blob: 1e0ac8bb246fa01a78fd20d293afbbcbf27200cf [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarova03b1a52010-04-19 01:15:17 +000060#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/18/04"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000107
Eilon Greensteina18f5122009-08-12 08:23:26 +0000108static int dropless_fc;
109module_param(dropless_fc, int, 0);
110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200113module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000114MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000115
116static int mrrs = -1;
117module_param(mrrs, int, 0);
118MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200121module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000122MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200125
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800126static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200127
128enum bnx2x_board_type {
129 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700130 BCM57711 = 1,
131 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200132};
133
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700134/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800135static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200136 char *name;
137} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700143
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000144static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200148 { 0 }
149};
150
151MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153/****************************************************************************
154* General service functions
155****************************************************************************/
156
157/* used only at init
158 * locking is done by mcp
159 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000160void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200161{
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
166}
167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200168static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169{
170 u32 val;
171
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
176
177 return val;
178}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200179
180static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185};
186
187/* copy command into DMAE command memory and set DMAE command go */
188static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 int idx)
190{
191 u32 cmd_offset;
192 int i;
193
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200200 }
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
202}
203
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700204void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200206{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000207 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700209 int cnt = 200;
210
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217 return;
218 }
219
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000220 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200221
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200225#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200227#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000228 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200229#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
236 dmae.len = len32;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200240
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200251
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000252 mutex_lock(&bp->dmae_mutex);
253
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200254 *wb_comp = 0;
255
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200257
258 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700259
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700263 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000264 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200265 break;
266 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700267 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
270 msleep(100);
271 else
272 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200273 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700274
275 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200276}
277
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700278void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200279{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000280 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700282 int cnt = 200;
283
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
286 int i;
287
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292 return;
293 }
294
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000295 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200296
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200300#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200302#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000303 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200304#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311 dmae.len = len32;
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200315
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200323
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000324 mutex_lock(&bp->dmae_mutex);
325
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200327 *wb_comp = 0;
328
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200330
331 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700332
333 while (*wb_comp != DMAE_COMP_VAL) {
334
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700335 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000336 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200337 break;
338 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700339 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
342 msleep(100);
343 else
344 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200345 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700349
350 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200352
Eilon Greenstein573f2032009-08-12 08:24:14 +0000353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354 u32 addr, u32 len)
355{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000357 int offset = 0;
358
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000359 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000360 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
363 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000364 }
365
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367}
368
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700369/* used only for slowpath so not inlined */
370static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371{
372 u32 wb_write[2];
373
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
377}
378
379#ifdef USE_WB_RD
380static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381{
382 u32 wb_data[2];
383
384 REG_RD_DMAE(bp, reg, wb_data, 2);
385
386 return HILO_U64(wb_data[0], wb_data[1]);
387}
388#endif
389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390static int bnx2x_mc_assert(struct bnx2x *bp)
391{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200392 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700393 int i, rc = 0;
394 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200395
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700396 /* XSTORM */
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
399 if (last_idx)
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200401
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200404
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200413
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
418 rc++;
419 } else {
420 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200421 }
422 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700423
424 /* TSTORM */
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
427 if (last_idx)
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
446 rc++;
447 } else {
448 break;
449 }
450 }
451
452 /* CSTORM */
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
455 if (last_idx)
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
474 rc++;
475 } else {
476 break;
477 }
478 }
479
480 /* USTORM */
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
483 if (last_idx)
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
502 rc++;
503 } else {
504 break;
505 }
506 }
507
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200508 return rc;
509}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800510
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200511static void bnx2x_fw_dump(struct bnx2x *bp)
512{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000513 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000515 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200516 int word;
517
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000518 if (BP_NOMCP(bp)) {
519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000526 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200527
Joe Perches7995c642010-02-17 15:01:52 +0000528 pr_err("");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200530 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000531 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200532 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000533 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200534 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200536 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000537 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200538 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000539 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200540 }
Joe Perches7995c642010-02-17 15:01:52 +0000541 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200542}
543
544static void bnx2x_panic_dump(struct bnx2x *bp)
545{
546 int i;
547 u16 j, start, end;
548
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200552 BNX2X_ERR("begin crash dump -----------------\n");
553
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000554 /* Indices */
555 /* Common */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000563 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000564 struct bnx2x_fastpath *fp = &bp->fp[i];
565
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
577 }
578
579 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000580 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200581 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200582
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700590 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700591 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000592 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200593
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000594 /* Rings */
595 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000596 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000597 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200598
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000601 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200607 }
608
Eilon Greenstein3196a882008-08-13 15:58:49 -0700609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000611 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700617 }
618
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200626 }
627 }
628
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000629 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000630 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000631 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000640 }
641
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000649 }
650 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200651
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700652 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200653 bnx2x_mc_assert(bp);
654 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200655}
656
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800657static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200658{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700659 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200664
665 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000670 } else if (msi) {
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675 } else {
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800683
684 REG_WR(bp, addr, val);
685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687 }
688
Eilon Greenstein8badd272009-02-12 08:36:15 +0000689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200691
692 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000693 /*
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
695 */
696 mmiowb();
697 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
701 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700703 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000704 /* enable nig and gpio3 attention */
705 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700706 } else
707 val = 0xffff;
708
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000712
713 /* Make sure that interrupts are indeed enabled from here on */
714 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200715}
716
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800717static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200718{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700719 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
722
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729 val, port, addr);
730
Eilon Greenstein8badd272009-02-12 08:36:15 +0000731 /* flush all outstanding writes */
732 mmiowb();
733
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737}
738
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700739static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200740{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000742 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700744 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700748 if (disable_hw)
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751
752 /* make sure all ISRs are done */
753 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000754 synchronize_irq(bp->msix_table[0].vector);
755 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000756#ifdef BCM_CNIC
757 offset++;
758#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000760 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200761 } else
762 synchronize_irq(bp->pdev->irq);
763
764 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200767}
768
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700769/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200770
771/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700772 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200773 */
774
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +0000775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200810 u8 storm, u16 index, u8 op, u8 update)
811{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200814 struct igu_ack_register igu_ack;
815
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
Eilon Greenstein5c862842008-08-13 15:51:48 -0700823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000826
827 /* Make sure that ACK is written */
828 mmiowb();
829 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200830}
831
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000832static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200833{
834 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835
836 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839}
840
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841static u16 bnx2x_ack_int(struct bnx2x *bp)
842{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200846
Eilon Greenstein5c862842008-08-13 15:51:48 -0700847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200850 return result;
851}
852
853
854/*
855 * fast path service functions
856 */
857
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800858static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859{
860 /* Tell compiler that consumer and producer can change */
861 barrier();
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000863}
864
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200865/* free skb in the packet ring at pos idx
866 * return idx of last bd freed
867 */
868static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869 u16 idx)
870{
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200876 int nbd;
877
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
879 prefetch(&skb->end);
880
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
882 idx, tx_buf, skb);
883
884 /* unmap first bd */
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
FUJITA Tomonori1a983142010-04-04 01:51:03 +0000887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Eilon Greensteinca003922009-08-12 22:53:28 -0700888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200889
Eilon Greensteinca003922009-08-12 22:53:28 -0700890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700893 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200894 bnx2x_panic();
895 }
896#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700897 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200898
Eilon Greensteinca003922009-08-12 22:53:28 -0700899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902 /* Skip a parse bd... */
903 --nbd;
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200910 }
911
912 /* now free frags */
913 while (nbd > 0) {
914
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
FUJITA Tomonori1a983142010-04-04 01:51:03 +0000917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200919 if (--nbd)
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921 }
922
923 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700924 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000925 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200926 tx_buf->first_bd = 0;
927 tx_buf->skb = NULL;
928
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700929 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200930}
931
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700932static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200933{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700934 s16 used;
935 u16 prod;
936 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200937
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
940
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700946 WARN_ON(used < 0);
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700949#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200950
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700951 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200952}
953
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000954static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955{
956 u16 hw_cons;
957
958 /* Tell compiler that status block fields can change */
959 barrier();
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
962}
963
964static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200965{
966 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000967 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200969
970#ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000972 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973#endif
974
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000975 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
978
979 while (sw_cons != hw_cons) {
980 u16 pkt_cons;
981
982 pkt_cons = TX_BD(sw_cons);
983
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200987 hw_cons, sw_cons, pkt_cons);
988
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700989/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200990 rmb();
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992 }
993*/
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200996 }
997
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1000
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1005 * forever.
1006 */
Stanislaw Gruszka2d99cf12010-03-09 06:55:00 +00001007 smp_mb();
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001008
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001009 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001010 if (unlikely(netif_tx_queue_stopped(txq))) {
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015 *
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1018 * stops the queue
Eilon Greenstein60447352009-03-02 07:59:24 +00001019 */
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001020
1021 __netif_tx_lock(txq, smp_processor_id());
Eilon Greenstein60447352009-03-02 07:59:24 +00001022
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001023 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001024 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001026 netif_tx_wake_queue(txq);
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001027
1028 __netif_tx_unlock(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001029 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001030 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001031}
1032
Michael Chan993ac7b2009-10-10 13:46:56 +00001033#ifdef BCM_CNIC
1034static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001037static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1039{
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001044 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001046 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001047 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001048
1049 bp->spq_left++;
1050
Eilon Greenstein0626b892009-02-12 08:38:14 +00001051 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056 cid);
1057 fp->state = BNX2X_FP_STATE_OPEN;
1058 break;
1059
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062 cid);
1063 fp->state = BNX2X_FP_STATE_HALTED;
1064 break;
1065
1066 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001067 BNX2X_ERR("unexpected MC reply (%d) "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001070 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001071 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001072 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001073 return;
1074 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001075
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1080 break;
1081
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1086 break;
1087
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091 break;
1092
Michael Chan993ac7b2009-10-10 13:46:56 +00001093#ifdef BCM_CNIC
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1097 break;
1098#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001099
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001103 bp->set_mac_pending--;
1104 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001105 break;
1106
Eliezer Tamir49d66772008-02-28 11:53:13 -08001107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001109 bp->set_mac_pending--;
1110 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001111 break;
1112
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001113 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001115 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001116 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001117 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001118 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001119}
1120
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001121static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1123{
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128 /* Skip "next page" elements */
1129 if (!page)
1130 return;
1131
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136 sw_buf->page = NULL;
1137 sge->addr_hi = 0;
1138 sge->addr_lo = 0;
1139}
1140
1141static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1143{
1144 int i;
1145
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1148}
1149
1150static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1152{
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156 dma_addr_t mapping;
1157
1158 if (unlikely(page == NULL))
1159 return -ENOMEM;
1160
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165 return -ENOMEM;
1166 }
1167
1168 sw_buf->page = page;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001169 dma_unmap_addr_set(sw_buf, mapping, mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001170
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174 return 0;
1175}
1176
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001177static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1179{
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183 dma_addr_t mapping;
1184
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1187 return -ENOMEM;
1188
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190 DMA_FROM_DEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001192 dev_kfree_skb(skb);
1193 return -ENOMEM;
1194 }
1195
1196 rx_buf->skb = skb;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001197 dma_unmap_addr_set(rx_buf, mapping, mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001198
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202 return 0;
1203}
1204
1205/* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1209 */
1210static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001222
1223 prod_rx_buf->skb = cons_rx_buf->skb;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001226 *prod_bd = *cons_bd;
1227}
1228
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230 u16 idx)
1231{
1232 u16 last_max = fp->last_max_sge;
1233
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1236}
1237
1238static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239{
1240 int i, j;
1241
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1244
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1247 idx--;
1248 }
1249 }
1250}
1251
1252static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1254{
1255 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001257 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001258 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001259 u16 last_max, last_elem, first_elem;
1260 u16 delta = 0;
1261 u16 i;
1262
1263 if (!sge_len)
1264 return;
1265
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1283 last_elem++;
1284
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1288 break;
1289
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1292 }
1293
1294 if (delta > 0) {
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1298 }
1299
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1303}
1304
1305static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306{
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
Eilon Greenstein33471622008-08-13 15:59:08 -07001311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1316}
1317
1318static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1320{
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325 dma_addr_t mapping;
1326
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001332
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346#ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001348#ifdef _ASM_GENERIC_INT_L64_H
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350#else
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352#endif
1353 fp->tpa_queue_used);
1354#endif
1355}
1356
1357static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1360 u16 cqe_idx)
1361{
1362 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1365 int err;
1366 int j;
1367
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001370
1371 /* This is needed in order to enable forwarding support */
1372 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001374 max(frag_size, (u32)len_on_bd));
1375
1376#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379 pages, cqe_idx);
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1382 bnx2x_panic();
1383 return -EINVAL;
1384 }
1385#endif
1386
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001394 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001395 old_rx_pg = *rx_pg;
1396
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001401 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001402 return err;
1403 }
1404
1405 /* Unmap the page as we r going to pass it to the stack */
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001409
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1416
1417 frag_size -= frag_len;
1418 }
1419
1420 return 0;
1421}
1422
1423static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425 u16 cqe_idx)
1426{
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1429 /* alloc new skb */
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434 fails. */
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001437
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001438 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001441#ifdef BCM_VLAN
1442 int is_vlan_cqe =
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001448
1449 prefetch(skb);
1450 prefetch(((char *)(skb)) + 128);
1451
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001452#ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1457 bnx2x_panic();
1458 return;
1459 }
1460#endif
1461
1462 skb_reserve(skb, pad);
1463 skb_put(skb, len);
1464
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468 {
1469 struct iphdr *iph;
1470
1471 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001472#ifdef BCM_VLAN
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001478 iph->check = 0;
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480 }
1481
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1484#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07001487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1489 vlan_tag), skb);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001490 else
1491#endif
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07001492 napi_gro_receive(&fp->napi, skb);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001493 } else {
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1496 dev_kfree_skb(skb);
1497 }
1498
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001499
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1502
1503 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001504 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001507 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001508 }
1509
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511}
1512
1513static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1516 u16 rx_sge_prod)
1517{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001518 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001519 int i;
1520
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1525
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001526 /*
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1529 * is updated.
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1533 */
1534 wmb();
1535
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001539 ((u32 *)&rx_prods)[i]);
1540
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001541 mmiowb(); /* keep prod updates ordered */
1542
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001543 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001546}
1547
Vladislav Zolotarov6f3c72a2010-07-06 04:09:43 +00001548/* Set Toeplitz hash value in the skb using the value from the
1549 * CQE (calculated by HW).
1550 */
1551static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1552 struct sk_buff *skb)
1553{
1554 /* Set Toeplitz hash from CQE */
1555 if ((bp->dev->features & NETIF_F_RXHASH) &&
1556 (cqe->fast_path_cqe.status_flags &
1557 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1558 skb->rxhash =
1559 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1560}
1561
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001562static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1563{
1564 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001565 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001566 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1567 int rx_pkt = 0;
1568
1569#ifdef BNX2X_STOP_ON_ERROR
1570 if (unlikely(bp->panic))
1571 return 0;
1572#endif
1573
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001574 /* CQ "next element" is of the size of the regular element,
1575 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001576 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1577 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1578 hw_comp_cons++;
1579
1580 bd_cons = fp->rx_bd_cons;
1581 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001582 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001583 sw_comp_cons = fp->rx_comp_cons;
1584 sw_comp_prod = fp->rx_comp_prod;
1585
1586 /* Memory barrier necessary as speculative reads of the rx
1587 * buffer can be ahead of the index in the status block
1588 */
1589 rmb();
1590
1591 DP(NETIF_MSG_RX_STATUS,
1592 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001593 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001594
1595 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001596 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001597 struct sk_buff *skb;
1598 union eth_rx_cqe *cqe;
Vladislav Zolotarov6f3c72a2010-07-06 04:09:43 +00001599 u8 cqe_fp_flags;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001600 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001601
1602 comp_ring_cons = RCQ_BD(sw_comp_cons);
1603 bd_prod = RX_BD(bd_prod);
1604 bd_cons = RX_BD(bd_cons);
1605
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001606 /* Prefetch the page containing the BD descriptor
1607 at producer's index. It will be needed when new skb is
1608 allocated */
1609 prefetch((void *)(PAGE_ALIGN((unsigned long)
1610 (&fp->rx_desc_ring[bd_prod])) -
1611 PAGE_SIZE + 1));
1612
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001613 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001614 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001615
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001616 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001617 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1618 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001619 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1621 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001622
1623 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001624 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001625 bnx2x_sp_event(fp, cqe);
1626 goto next_cqe;
1627
1628 /* this is an rx packet */
1629 } else {
1630 rx_buf = &fp->rx_buf_ring[bd_cons];
1631 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001632 prefetch(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001633 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1634 pad = cqe->fast_path_cqe.placement_offset;
1635
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001636 /* If CQE is marked both TPA_START and TPA_END
1637 it is a non-TPA CQE */
1638 if ((!fp->disable_tpa) &&
1639 (TPA_TYPE(cqe_fp_flags) !=
1640 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001641 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001642
1643 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1644 DP(NETIF_MSG_RX_STATUS,
1645 "calling tpa_start on queue %d\n",
1646 queue);
1647
1648 bnx2x_tpa_start(fp, queue, skb,
1649 bd_cons, bd_prod);
Vladislav Zolotarov6f3c72a2010-07-06 04:09:43 +00001650
1651 /* Set Toeplitz hash for an LRO skb */
1652 bnx2x_set_skb_rxhash(bp, cqe, skb);
1653
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001654 goto next_rx;
1655 }
1656
1657 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1658 DP(NETIF_MSG_RX_STATUS,
1659 "calling tpa_stop on queue %d\n",
1660 queue);
1661
1662 if (!BNX2X_RX_SUM_FIX(cqe))
1663 BNX2X_ERR("STOP on none TCP "
1664 "data\n");
1665
1666 /* This is a size of the linear data
1667 on this skb */
1668 len = le16_to_cpu(cqe->fast_path_cqe.
1669 len_on_bd);
1670 bnx2x_tpa_stop(bp, fp, queue, pad,
1671 len, cqe, comp_ring_cons);
1672#ifdef BNX2X_STOP_ON_ERROR
1673 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001674 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001675#endif
1676
1677 bnx2x_update_sge_prod(fp,
1678 &cqe->fast_path_cqe);
1679 goto next_cqe;
1680 }
1681 }
1682
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001683 dma_sync_single_for_device(&bp->pdev->dev,
1684 dma_unmap_addr(rx_buf, mapping),
1685 pad + RX_COPY_THRESH,
1686 DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001687 prefetch(((char *)(skb)) + 128);
1688
1689 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001690 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001691 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001692 "ERROR flags %x rx packet %u\n",
1693 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001694 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695 goto reuse_rx;
1696 }
1697
1698 /* Since we don't have a jumbo ring
1699 * copy small packets if mtu > 1500
1700 */
1701 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1702 (len <= RX_COPY_THRESH)) {
1703 struct sk_buff *new_skb;
1704
1705 new_skb = netdev_alloc_skb(bp->dev,
1706 len + pad);
1707 if (new_skb == NULL) {
1708 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001709 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001710 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001711 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001712 goto reuse_rx;
1713 }
1714
1715 /* aligned copy */
1716 skb_copy_from_linear_data_offset(skb, pad,
1717 new_skb->data + pad, len);
1718 skb_reserve(new_skb, pad);
1719 skb_put(new_skb, len);
1720
1721 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1722
1723 skb = new_skb;
1724
Eilon Greensteina119a062009-08-12 08:23:23 +00001725 } else
1726 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001727 dma_unmap_single(&bp->pdev->dev,
1728 dma_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001729 bp->rx_buf_size,
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001730 DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001731 skb_reserve(skb, pad);
1732 skb_put(skb, len);
1733
1734 } else {
1735 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001736 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001737 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001738 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001739reuse_rx:
1740 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1741 goto next_rx;
1742 }
1743
1744 skb->protocol = eth_type_trans(skb, bp->dev);
1745
Vladislav Zolotarov6f3c72a2010-07-06 04:09:43 +00001746 /* Set Toeplitz hash for a none-LRO skb */
1747 bnx2x_set_skb_rxhash(bp, cqe, skb);
Tom Herbertc68ed252010-04-23 00:10:52 -07001748
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001749 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001750 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001751 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1752 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001753 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001754 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001755 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001756 }
1757
Eilon Greenstein748e5432009-02-12 08:36:37 +00001758 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001759
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001760#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001761 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001762 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1763 PARSING_FLAGS_VLAN))
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07001764 vlan_gro_receive(&fp->napi, bp->vlgrp,
1765 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001766 else
1767#endif
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07001768 napi_gro_receive(&fp->napi, skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001769
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770
1771next_rx:
1772 rx_buf->skb = NULL;
1773
1774 bd_cons = NEXT_RX_IDX(bd_cons);
1775 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001776 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1777 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001778next_cqe:
1779 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1780 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001781
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001782 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001783 break;
1784 } /* while */
1785
1786 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001787 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001788 fp->rx_comp_cons = sw_comp_cons;
1789 fp->rx_comp_prod = sw_comp_prod;
1790
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001791 /* Update producers */
1792 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1793 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001794
1795 fp->rx_pkt += rx_pkt;
1796 fp->rx_calls++;
1797
1798 return rx_pkt;
1799}
1800
1801static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1802{
1803 struct bnx2x_fastpath *fp = fp_cookie;
1804 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001805
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001806 /* Return here if interrupt is disabled */
1807 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1808 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1809 return IRQ_HANDLED;
1810 }
1811
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001812 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001813 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001814 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001815
1816#ifdef BNX2X_STOP_ON_ERROR
1817 if (unlikely(bp->panic))
1818 return IRQ_HANDLED;
1819#endif
1820
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001821 /* Handle Rx and Tx according to MSI-X vector */
1822 prefetch(fp->rx_cons_sb);
1823 prefetch(fp->tx_cons_sb);
1824 prefetch(&fp->status_blk->u_status_block.status_block_index);
1825 prefetch(&fp->status_blk->c_status_block.status_block_index);
1826 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001827
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001828 return IRQ_HANDLED;
1829}
1830
1831static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1832{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001833 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001835 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001836 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001837
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001838 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001839 if (unlikely(status == 0)) {
1840 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1841 return IRQ_NONE;
1842 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001843 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001844
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001845 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001846 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1847 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1848 return IRQ_HANDLED;
1849 }
1850
Eilon Greenstein3196a882008-08-13 15:58:49 -07001851#ifdef BNX2X_STOP_ON_ERROR
1852 if (unlikely(bp->panic))
1853 return IRQ_HANDLED;
1854#endif
1855
Eilon Greensteinca003922009-08-12 22:53:28 -07001856 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1857 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001858
Eilon Greensteinca003922009-08-12 22:53:28 -07001859 mask = 0x2 << fp->sb_id;
1860 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001861 /* Handle Rx and Tx according to SB id */
1862 prefetch(fp->rx_cons_sb);
1863 prefetch(&fp->status_blk->u_status_block.
1864 status_block_index);
1865 prefetch(fp->tx_cons_sb);
1866 prefetch(&fp->status_blk->c_status_block.
1867 status_block_index);
1868 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001869 status &= ~mask;
1870 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001871 }
1872
Michael Chan993ac7b2009-10-10 13:46:56 +00001873#ifdef BCM_CNIC
1874 mask = 0x2 << CNIC_SB_ID(bp);
1875 if (status & (mask | 0x1)) {
1876 struct cnic_ops *c_ops = NULL;
1877
1878 rcu_read_lock();
1879 c_ops = rcu_dereference(bp->cnic_ops);
1880 if (c_ops)
1881 c_ops->cnic_handler(bp->cnic_data, NULL);
1882 rcu_read_unlock();
1883
1884 status &= ~mask;
1885 }
1886#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001887
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001888 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001889 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001890
1891 status &= ~0x1;
1892 if (!status)
1893 return IRQ_HANDLED;
1894 }
1895
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001896 if (unlikely(status))
1897 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001898 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001899
1900 return IRQ_HANDLED;
1901}
1902
1903/* end of fast path */
1904
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001905static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001906
1907/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001908
1909/*
1910 * General service functions
1911 */
1912
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001913static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001914{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001915 u32 lock_status;
1916 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001917 int func = BP_FUNC(bp);
1918 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001919 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001920
1921 /* Validating that the resource is within range */
1922 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1923 DP(NETIF_MSG_HW,
1924 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1926 return -EINVAL;
1927 }
1928
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001929 if (func <= 5) {
1930 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1931 } else {
1932 hw_lock_control_reg =
1933 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1934 }
1935
Eliezer Tamirf1410642008-02-28 11:51:50 -08001936 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001937 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001938 if (lock_status & resource_bit) {
1939 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1940 lock_status, resource_bit);
1941 return -EEXIST;
1942 }
1943
Eilon Greenstein46230472008-08-25 15:23:30 -07001944 /* Try for 5 second every 5ms */
1945 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001946 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001947 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1948 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001949 if (lock_status & resource_bit)
1950 return 0;
1951
1952 msleep(5);
1953 }
1954 DP(NETIF_MSG_HW, "Timeout\n");
1955 return -EAGAIN;
1956}
1957
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001958static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001959{
1960 u32 lock_status;
1961 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001962 int func = BP_FUNC(bp);
1963 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001964
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001965 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1966
Eliezer Tamirf1410642008-02-28 11:51:50 -08001967 /* Validating that the resource is within range */
1968 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1969 DP(NETIF_MSG_HW,
1970 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1971 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1972 return -EINVAL;
1973 }
1974
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001975 if (func <= 5) {
1976 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1977 } else {
1978 hw_lock_control_reg =
1979 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1980 }
1981
Eliezer Tamirf1410642008-02-28 11:51:50 -08001982 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001983 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001984 if (!(lock_status & resource_bit)) {
1985 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1986 lock_status, resource_bit);
1987 return -EFAULT;
1988 }
1989
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001990 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001991 return 0;
1992}
1993
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001994/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001995static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001996{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001997 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001998
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001999 if (bp->port.need_hw_lock)
2000 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002001}
2002
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002003static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002004{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00002005 if (bp->port.need_hw_lock)
2006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002007
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002008 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002009}
2010
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002011int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2012{
2013 /* The GPIO should be swapped if swap register is set and active */
2014 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2015 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2016 int gpio_shift = gpio_num +
2017 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2018 u32 gpio_mask = (1 << gpio_shift);
2019 u32 gpio_reg;
2020 int value;
2021
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024 return -EINVAL;
2025 }
2026
2027 /* read GPIO value */
2028 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2029
2030 /* get the requested pin value */
2031 if ((gpio_reg & gpio_mask) == gpio_mask)
2032 value = 1;
2033 else
2034 value = 0;
2035
2036 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2037
2038 return value;
2039}
2040
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002041int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08002042{
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2049 u32 gpio_reg;
2050
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053 return -EINVAL;
2054 }
2055
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002057 /* read GPIO and mask except the float bits */
2058 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2059
2060 switch (mode) {
2061 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2062 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2063 gpio_num, gpio_shift);
2064 /* clear FLOAT and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2067 break;
2068
2069 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2070 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2071 gpio_num, gpio_shift);
2072 /* clear FLOAT and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2075 break;
2076
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002077 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002078 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2079 gpio_num, gpio_shift);
2080 /* set FLOAT */
2081 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2082 break;
2083
2084 default:
2085 break;
2086 }
2087
2088 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002089 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002090
2091 return 0;
2092}
2093
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002094int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2095{
2096 /* The GPIO should be swapped if swap register is set and active */
2097 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2098 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2099 int gpio_shift = gpio_num +
2100 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2101 u32 gpio_mask = (1 << gpio_shift);
2102 u32 gpio_reg;
2103
2104 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2105 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2106 return -EINVAL;
2107 }
2108
2109 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110 /* read GPIO int */
2111 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2112
2113 switch (mode) {
2114 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2115 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2116 "output low\n", gpio_num, gpio_shift);
2117 /* clear SET and set CLR */
2118 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2119 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2120 break;
2121
2122 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2123 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2124 "output high\n", gpio_num, gpio_shift);
2125 /* clear CLR and set SET */
2126 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2127 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2128 break;
2129
2130 default:
2131 break;
2132 }
2133
2134 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2135 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2136
2137 return 0;
2138}
2139
Eliezer Tamirf1410642008-02-28 11:51:50 -08002140static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2141{
2142 u32 spio_mask = (1 << spio_num);
2143 u32 spio_reg;
2144
2145 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2146 (spio_num > MISC_REGISTERS_SPIO_7)) {
2147 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2148 return -EINVAL;
2149 }
2150
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002151 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002152 /* read SPIO and mask except the float bits */
2153 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2154
2155 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002156 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002157 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2158 /* clear FLOAT and set CLR */
2159 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2161 break;
2162
Eilon Greenstein6378c022008-08-13 15:59:25 -07002163 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002164 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2165 /* clear FLOAT and set SET */
2166 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2167 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2168 break;
2169
2170 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2171 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2172 /* set FLOAT */
2173 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2174 break;
2175
2176 default:
2177 break;
2178 }
2179
2180 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002181 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002182
2183 return 0;
2184}
2185
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002186static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002187{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002188 switch (bp->link_vars.ieee_fc &
2189 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002190 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002191 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002192 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002193 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002194
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002195 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002196 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002197 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002198 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002199
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002200 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002201 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002202 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002203
Eliezer Tamirf1410642008-02-28 11:51:50 -08002204 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002205 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002206 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002207 break;
2208 }
2209}
2210
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002211static void bnx2x_link_report(struct bnx2x *bp)
2212{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002213 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002214 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002215 netdev_err(bp->dev, "NIC Link is Down\n");
Eilon Greenstein2691d512009-08-12 08:22:08 +00002216 return;
2217 }
2218
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002219 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002220 u16 line_speed;
2221
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002222 if (bp->state == BNX2X_STATE_OPEN)
2223 netif_carrier_on(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002224 netdev_info(bp->dev, "NIC Link is Up, ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002225
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002226 line_speed = bp->link_vars.line_speed;
2227 if (IS_E1HMF(bp)) {
2228 u16 vn_max_rate;
2229
2230 vn_max_rate =
2231 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2232 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2233 if (vn_max_rate < line_speed)
2234 line_speed = vn_max_rate;
2235 }
Joe Perches7995c642010-02-17 15:01:52 +00002236 pr_cont("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002237
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002238 if (bp->link_vars.duplex == DUPLEX_FULL)
Joe Perches7995c642010-02-17 15:01:52 +00002239 pr_cont("full duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002240 else
Joe Perches7995c642010-02-17 15:01:52 +00002241 pr_cont("half duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002242
David S. Millerc0700f92008-12-16 23:53:20 -08002243 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2244 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Joe Perches7995c642010-02-17 15:01:52 +00002245 pr_cont(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002246 if (bp->link_vars.flow_ctrl &
2247 BNX2X_FLOW_CTRL_TX)
Joe Perches7995c642010-02-17 15:01:52 +00002248 pr_cont("& transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002249 } else {
Joe Perches7995c642010-02-17 15:01:52 +00002250 pr_cont(", transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002251 }
Joe Perches7995c642010-02-17 15:01:52 +00002252 pr_cont("flow control ON");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253 }
Joe Perches7995c642010-02-17 15:01:52 +00002254 pr_cont("\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002255
2256 } else { /* link_down */
2257 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002258 netdev_err(bp->dev, "NIC Link is Down\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002259 }
2260}
2261
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002262static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002263{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002264 if (!BP_NOMCP(bp)) {
2265 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002266
Eilon Greenstein19680c42008-08-13 15:47:33 -07002267 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002268 /* It is recommended to turn off RX FC for jumbo frames
2269 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002270 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002271 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002272 else
David S. Millerc0700f92008-12-16 23:53:20 -08002273 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002274
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002275 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002276
2277 if (load_mode == LOAD_DIAG)
2278 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2279
Eilon Greenstein19680c42008-08-13 15:47:33 -07002280 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002281
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002282 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002283
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002284 bnx2x_calc_fc_adv(bp);
2285
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002286 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2287 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002288 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002289 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002290
Eilon Greenstein19680c42008-08-13 15:47:33 -07002291 return rc;
2292 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002293 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002294 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002295}
2296
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002297static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002298{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002299 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002300 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002301 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002302 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002303
Eilon Greenstein19680c42008-08-13 15:47:33 -07002304 bnx2x_calc_fc_adv(bp);
2305 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002306 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002307}
2308
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002309static void bnx2x__link_reset(struct bnx2x *bp)
2310{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002311 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002312 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002313 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002314 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002315 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002316 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002317}
2318
2319static u8 bnx2x_link_test(struct bnx2x *bp)
2320{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00002321 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002322
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00002323 if (!BP_NOMCP(bp)) {
2324 bnx2x_acquire_phy_lock(bp);
2325 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2326 bnx2x_release_phy_lock(bp);
2327 } else
2328 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002329
2330 return rc;
2331}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002332
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002333static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002334{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335 u32 r_param = bp->link_vars.line_speed / 8;
2336 u32 fair_periodic_timeout_usec;
2337 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002338
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002339 memset(&(bp->cmng.rs_vars), 0,
2340 sizeof(struct rate_shaping_vars_per_port));
2341 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002342
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002343 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2344 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002345
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002346 /* this is the threshold below which no timer arming will occur
2347 1.25 coefficient is for the threshold to be a little bigger
2348 than the real time, to compensate for timer in-accuracy */
2349 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002350 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2351
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002352 /* resolution of fairness timer */
2353 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2354 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2355 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002356
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357 /* this is the threshold below which we won't arm the timer anymore */
2358 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002359
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002360 /* we multiply by 1e3/8 to get bytes/msec.
2361 We don't want the credits to pass a credit
2362 of the t_fair*FAIR_MEM (algorithm resolution) */
2363 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2364 /* since each tick is 4 usec */
2365 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002366}
2367
Eilon Greenstein2691d512009-08-12 08:22:08 +00002368/* Calculates the sum of vn_min_rates.
2369 It's needed for further normalizing of the min_rates.
2370 Returns:
2371 sum of vn_min_rates.
2372 or
2373 0 - if all the min_rates are 0.
2374 In the later case fainess algorithm should be deactivated.
2375 If not all min_rates are zero then those that are zeroes will be set to 1.
2376 */
2377static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2378{
2379 int all_zero = 1;
2380 int port = BP_PORT(bp);
2381 int vn;
2382
2383 bp->vn_weight_sum = 0;
2384 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2385 int func = 2*vn + port;
2386 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2387 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2388 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2389
2390 /* Skip hidden vns */
2391 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2392 continue;
2393
2394 /* If min rate is zero - set it to 1 */
2395 if (!vn_min_rate)
2396 vn_min_rate = DEF_MIN_RATE;
2397 else
2398 all_zero = 0;
2399
2400 bp->vn_weight_sum += vn_min_rate;
2401 }
2402
2403 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002404 if (all_zero) {
2405 bp->cmng.flags.cmng_enables &=
2406 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2407 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2408 " fairness will be disabled\n");
2409 } else
2410 bp->cmng.flags.cmng_enables |=
2411 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002412}
2413
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002414static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002415{
2416 struct rate_shaping_vars_per_vn m_rs_vn;
2417 struct fairness_vars_per_vn m_fair_vn;
2418 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2419 u16 vn_min_rate, vn_max_rate;
2420 int i;
2421
2422 /* If function is hidden - set min and max to zeroes */
2423 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2424 vn_min_rate = 0;
2425 vn_max_rate = 0;
2426
2427 } else {
2428 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2429 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002430 /* If min rate is zero - set it to 1 */
2431 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002432 vn_min_rate = DEF_MIN_RATE;
2433 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2434 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2435 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002436 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002437 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002438 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002439
2440 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2441 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2442
2443 /* global vn counter - maximal Mbps for this vn */
2444 m_rs_vn.vn_counter.rate = vn_max_rate;
2445
2446 /* quota - number of bytes transmitted in this period */
2447 m_rs_vn.vn_counter.quota =
2448 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2449
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002450 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002451 /* credit for each period of the fairness algorithm:
2452 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002453 vn_weight_sum should not be larger than 10000, thus
2454 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2455 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002456 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002457 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2458 (8 * bp->vn_weight_sum))),
2459 (bp->cmng.fair_vars.fair_threshold * 2));
2460 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002461 m_fair_vn.vn_credit_delta);
2462 }
2463
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002464 /* Store it to internal memory */
2465 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2466 REG_WR(bp, BAR_XSTRORM_INTMEM +
2467 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2468 ((u32 *)(&m_rs_vn))[i]);
2469
2470 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2471 REG_WR(bp, BAR_XSTRORM_INTMEM +
2472 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2473 ((u32 *)(&m_fair_vn))[i]);
2474}
2475
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002476
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002477/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002478static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002479{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002480 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002481 /* Make sure that we are synced with the current statistics */
2482 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002484 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002485
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002486 if (bp->link_vars.link_up) {
2487
Eilon Greenstein1c063282009-02-12 08:36:43 +00002488 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002489 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002490 int port = BP_PORT(bp);
2491 u32 pause_enabled = 0;
2492
2493 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2494 pause_enabled = 1;
2495
2496 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002497 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002498 pause_enabled);
2499 }
2500
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002501 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2502 struct host_port_stats *pstats;
2503
2504 pstats = bnx2x_sp(bp, port_stats);
2505 /* reset old bmac stats */
2506 memset(&(pstats->mac_stx[0]), 0,
2507 sizeof(struct mac_stx));
2508 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002509 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002510 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2511 }
2512
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002513 /* indicate link status only if link status actually changed */
2514 if (prev_link_status != bp->link_vars.link_status)
2515 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002516
2517 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002518 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002519 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002520 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002521
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002522 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002523 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2524 if (vn == BP_E1HVN(bp))
2525 continue;
2526
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002527 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2529 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2530 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002531
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002532 if (bp->link_vars.link_up) {
2533 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002534
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002535 /* Init rate shaping and fairness contexts */
2536 bnx2x_init_port_minmax(bp);
2537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002538 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002539 bnx2x_init_vn_minmax(bp, 2*vn + port);
2540
2541 /* Store it to internal memory */
2542 for (i = 0;
2543 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2544 REG_WR(bp, BAR_XSTRORM_INTMEM +
2545 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2546 ((u32 *)(&bp->cmng))[i]);
2547 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002548 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002549}
2550
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002551static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002552{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002553 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002554 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002555
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002556 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2557
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002558 if (bp->link_vars.link_up)
2559 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2560 else
2561 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563 bnx2x_calc_vn_weight_sum(bp);
2564
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002565 /* indicate link status */
2566 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002567}
2568
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002569static void bnx2x_pmf_update(struct bnx2x *bp)
2570{
2571 int port = BP_PORT(bp);
2572 u32 val;
2573
2574 bp->port.pmf = 1;
2575 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2576
2577 /* enable nig attention */
2578 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2579 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2580 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002581
2582 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002583}
2584
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002585/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002586
2587/* slow path */
2588
2589/*
2590 * General service functions
2591 */
2592
Eilon Greenstein2691d512009-08-12 08:22:08 +00002593/* send the MCP a request, block until there is a reply */
2594u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2595{
2596 int func = BP_FUNC(bp);
2597 u32 seq = ++bp->fw_seq;
2598 u32 rc = 0;
2599 u32 cnt = 1;
2600 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2601
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002602 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002603 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2604 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2605
2606 do {
2607 /* let the FW do it's magic ... */
2608 msleep(delay);
2609
2610 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2611
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002612 /* Give the FW up to 5 second (500*10ms) */
2613 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002614
2615 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2616 cnt*delay, rc, seq);
2617
2618 /* is this a reply to our command? */
2619 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2620 rc &= FW_MSG_CODE_MASK;
2621 else {
2622 /* FW BUG! */
2623 BNX2X_ERR("FW failed to respond!\n");
2624 bnx2x_fw_dump(bp);
2625 rc = 0;
2626 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002627 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002628
2629 return rc;
2630}
2631
Michael Chane665bfd2009-10-10 13:46:54 +00002632static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002633static void bnx2x_set_rx_mode(struct net_device *dev);
2634
2635static void bnx2x_e1h_disable(struct bnx2x *bp)
2636{
2637 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002638
2639 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002640
2641 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2642
Eilon Greenstein2691d512009-08-12 08:22:08 +00002643 netif_carrier_off(bp->dev);
2644}
2645
2646static void bnx2x_e1h_enable(struct bnx2x *bp)
2647{
2648 int port = BP_PORT(bp);
2649
2650 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2651
Eilon Greenstein2691d512009-08-12 08:22:08 +00002652 /* Tx queue should be only reenabled */
2653 netif_tx_wake_all_queues(bp->dev);
2654
Eilon Greenstein061bc702009-10-15 00:18:47 -07002655 /*
2656 * Should not call netif_carrier_on since it will be called if the link
2657 * is up when checking for link state
2658 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002659}
2660
2661static void bnx2x_update_min_max(struct bnx2x *bp)
2662{
2663 int port = BP_PORT(bp);
2664 int vn, i;
2665
2666 /* Init rate shaping and fairness contexts */
2667 bnx2x_init_port_minmax(bp);
2668
2669 bnx2x_calc_vn_weight_sum(bp);
2670
2671 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2672 bnx2x_init_vn_minmax(bp, 2*vn + port);
2673
2674 if (bp->port.pmf) {
2675 int func;
2676
2677 /* Set the attention towards other drivers on the same port */
2678 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2679 if (vn == BP_E1HVN(bp))
2680 continue;
2681
2682 func = ((vn << 1) | port);
2683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2684 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2685 }
2686
2687 /* Store it to internal memory */
2688 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2689 REG_WR(bp, BAR_XSTRORM_INTMEM +
2690 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2691 ((u32 *)(&bp->cmng))[i]);
2692 }
2693}
2694
2695static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2696{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002697 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002698
2699 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2700
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002701 /*
2702 * This is the only place besides the function initialization
2703 * where the bp->flags can change so it is done without any
2704 * locks
2705 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002706 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2707 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002708 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002709
2710 bnx2x_e1h_disable(bp);
2711 } else {
2712 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002713 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002714
2715 bnx2x_e1h_enable(bp);
2716 }
2717 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2718 }
2719 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2720
2721 bnx2x_update_min_max(bp);
2722 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2723 }
2724
2725 /* Report results to MCP */
2726 if (dcc_event)
2727 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2728 else
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2730}
2731
Michael Chan28912902009-10-10 13:46:53 +00002732/* must be called under the spq lock */
2733static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2734{
2735 struct eth_spe *next_spe = bp->spq_prod_bd;
2736
2737 if (bp->spq_prod_bd == bp->spq_last_bd) {
2738 bp->spq_prod_bd = bp->spq;
2739 bp->spq_prod_idx = 0;
2740 DP(NETIF_MSG_TIMER, "end of spq\n");
2741 } else {
2742 bp->spq_prod_bd++;
2743 bp->spq_prod_idx++;
2744 }
2745 return next_spe;
2746}
2747
2748/* must be called under the spq lock */
2749static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2750{
2751 int func = BP_FUNC(bp);
2752
2753 /* Make sure that BD data is updated before writing the producer */
2754 wmb();
2755
2756 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2757 bp->spq_prod_idx);
2758 mmiowb();
2759}
2760
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002761/* the slow path queue is odd since completions arrive on the fastpath ring */
2762static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2763 u32 data_hi, u32 data_lo, int common)
2764{
Michael Chan28912902009-10-10 13:46:53 +00002765 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002766
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002767#ifdef BNX2X_STOP_ON_ERROR
2768 if (unlikely(bp->panic))
2769 return -EIO;
2770#endif
2771
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002772 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002773
2774 if (!bp->spq_left) {
2775 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002776 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002777 bnx2x_panic();
2778 return -EBUSY;
2779 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002780
Michael Chan28912902009-10-10 13:46:53 +00002781 spe = bnx2x_sp_get_next(bp);
2782
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002783 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002784 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002785 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2786 HW_CID(bp, cid));
Michael Chan28912902009-10-10 13:46:53 +00002787 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002788 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002789 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2791
Michael Chan28912902009-10-10 13:46:53 +00002792 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2793 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002794
2795 bp->spq_left--;
2796
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002797 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2798 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2799 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2800 (u32)(U64_LO(bp->spq_mapping) +
2801 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2802 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2803
Michael Chan28912902009-10-10 13:46:53 +00002804 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002805 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806 return 0;
2807}
2808
2809/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002810static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002811{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002812 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002813 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002814
2815 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002816 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002817 val = (1UL << 31);
2818 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2819 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2820 if (val & (1L << 31))
2821 break;
2822
2823 msleep(5);
2824 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002825 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002826 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002827 rc = -EBUSY;
2828 }
2829
2830 return rc;
2831}
2832
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002833/* release split MCP access lock register */
2834static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002836 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002837}
2838
2839static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2840{
2841 struct host_def_status_block *def_sb = bp->def_status_blk;
2842 u16 rc = 0;
2843
2844 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2846 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2847 rc |= 1;
2848 }
2849 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2850 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2851 rc |= 2;
2852 }
2853 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2854 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2855 rc |= 4;
2856 }
2857 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2858 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2859 rc |= 8;
2860 }
2861 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2862 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2863 rc |= 16;
2864 }
2865 return rc;
2866}
2867
2868/*
2869 * slow path service functions
2870 */
2871
2872static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2873{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002874 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002875 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2876 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002877 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2878 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002879 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2880 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002881 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002882 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002883
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002884 if (bp->attn_state & asserted)
2885 BNX2X_ERR("IGU ERROR\n");
2886
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002887 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2888 aeu_mask = REG_RD(bp, aeu_addr);
2889
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002890 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002891 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002892 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002893 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002894
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002895 REG_WR(bp, aeu_addr, aeu_mask);
2896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002897
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002898 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002899 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002900 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002901
2902 if (asserted & ATTN_HARD_WIRED_MASK) {
2903 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002904
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002905 bnx2x_acquire_phy_lock(bp);
2906
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002907 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002908 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002909 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002910
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002911 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002912
2913 /* handle unicore attn? */
2914 }
2915 if (asserted & ATTN_SW_TIMER_4_FUNC)
2916 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2917
2918 if (asserted & GPIO_2_FUNC)
2919 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2920
2921 if (asserted & GPIO_3_FUNC)
2922 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2923
2924 if (asserted & GPIO_4_FUNC)
2925 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2926
2927 if (port == 0) {
2928 if (asserted & ATTN_GENERAL_ATTN_1) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2931 }
2932 if (asserted & ATTN_GENERAL_ATTN_2) {
2933 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2934 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2935 }
2936 if (asserted & ATTN_GENERAL_ATTN_3) {
2937 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2938 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2939 }
2940 } else {
2941 if (asserted & ATTN_GENERAL_ATTN_4) {
2942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2944 }
2945 if (asserted & ATTN_GENERAL_ATTN_5) {
2946 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2947 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2948 }
2949 if (asserted & ATTN_GENERAL_ATTN_6) {
2950 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2951 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2952 }
2953 }
2954
2955 } /* if hardwired */
2956
Eilon Greenstein5c862842008-08-13 15:51:48 -07002957 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2958 asserted, hc_addr);
2959 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002960
2961 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002962 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002963 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002964 bnx2x_release_phy_lock(bp);
2965 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002966}
2967
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002968static inline void bnx2x_fan_failure(struct bnx2x *bp)
2969{
2970 int port = BP_PORT(bp);
2971
2972 /* mark the failure */
2973 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2974 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2975 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2976 bp->link_params.ext_phy_config);
2977
2978 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002979 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2980 " the driver to shutdown the card to prevent permanent"
2981 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002982}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002983
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002984static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2985{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002986 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002987 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002988 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002989
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002992
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002993 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002994
2995 val = REG_RD(bp, reg_offset);
2996 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2997 REG_WR(bp, reg_offset, val);
2998
2999 BNX2X_ERR("SPIO5 hw attention\n");
3000
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00003001 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00003002 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
3003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07003004 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003005 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07003006 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00003007 /* The PHY reset is controlled by GPIO 1 */
3008 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3009 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003010 break;
3011
Eilon Greenstein4d295db2009-07-21 05:47:47 +00003012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3013 /* The PHY reset is controlled by GPIO 1 */
3014 /* fake the port number to cancel the swap done in
3015 set_gpio() */
3016 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3017 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3018 port = (swap_val && swap_override) ^ 1;
3019 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3020 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3021 break;
3022
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003023 default:
3024 break;
3025 }
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00003026 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003027 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003028
Eilon Greenstein589abe32009-02-12 08:36:55 +00003029 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3030 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3031 bnx2x_acquire_phy_lock(bp);
3032 bnx2x_handle_module_detect_int(&bp->link_params);
3033 bnx2x_release_phy_lock(bp);
3034 }
3035
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003036 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3037
3038 val = REG_RD(bp, reg_offset);
3039 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3040 REG_WR(bp, reg_offset, val);
3041
3042 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003043 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003044 bnx2x_panic();
3045 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003046}
3047
3048static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3049{
3050 u32 val;
3051
Eilon Greenstein0626b892009-02-12 08:38:14 +00003052 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003053
3054 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3055 BNX2X_ERR("DB hw attention 0x%x\n", val);
3056 /* DORQ discard attention */
3057 if (val & 0x2)
3058 BNX2X_ERR("FATAL error from DORQ\n");
3059 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003060
3061 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3062
3063 int port = BP_PORT(bp);
3064 int reg_offset;
3065
3066 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3067 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3068
3069 val = REG_RD(bp, reg_offset);
3070 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3071 REG_WR(bp, reg_offset, val);
3072
3073 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003074 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003075 bnx2x_panic();
3076 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003077}
3078
3079static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3080{
3081 u32 val;
3082
3083 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3084
3085 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3086 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3087 /* CFC error attention */
3088 if (val & 0x2)
3089 BNX2X_ERR("FATAL error from CFC\n");
3090 }
3091
3092 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3093
3094 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3095 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3096 /* RQ_USDMDP_FIFO_OVERFLOW */
3097 if (val & 0x18000)
3098 BNX2X_ERR("FATAL error from PXP\n");
3099 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003100
3101 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3102
3103 int port = BP_PORT(bp);
3104 int reg_offset;
3105
3106 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3107 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3108
3109 val = REG_RD(bp, reg_offset);
3110 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3111 REG_WR(bp, reg_offset, val);
3112
3113 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003114 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003115 bnx2x_panic();
3116 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003117}
3118
3119static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3120{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003121 u32 val;
3122
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003123 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3124
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003125 if (attn & BNX2X_PMF_LINK_ASSERT) {
3126 int func = BP_FUNC(bp);
3127
3128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003129 bp->mf_config = SHMEM_RD(bp,
3130 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003131 val = SHMEM_RD(bp, func_mb[func].drv_status);
3132 if (val & DRV_STATUS_DCC_EVENT_MASK)
3133 bnx2x_dcc_event(bp,
3134 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003135 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003136 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003137 bnx2x_pmf_update(bp);
3138
3139 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003140
3141 BNX2X_ERR("MC assert!\n");
3142 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3144 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3145 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3146 bnx2x_panic();
3147
3148 } else if (attn & BNX2X_MCP_ASSERT) {
3149
3150 BNX2X_ERR("MCP assert!\n");
3151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003152 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003153
3154 } else
3155 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3156 }
3157
3158 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003159 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3160 if (attn & BNX2X_GRC_TIMEOUT) {
3161 val = CHIP_IS_E1H(bp) ?
3162 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3163 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3164 }
3165 if (attn & BNX2X_GRC_RSV) {
3166 val = CHIP_IS_E1H(bp) ?
3167 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3168 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3169 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003170 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003172}
3173
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003174static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3175static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3176
3177
3178#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3179#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3180#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3181#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3182#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3183#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3184/*
3185 * should be run under rtnl lock
3186 */
3187static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3188{
3189 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3191 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3192 barrier();
3193 mmiowb();
3194}
3195
3196/*
3197 * should be run under rtnl lock
3198 */
3199static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3200{
3201 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3202 val |= (1 << 16);
3203 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3204 barrier();
3205 mmiowb();
3206}
3207
3208/*
3209 * should be run under rtnl lock
3210 */
3211static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3212{
3213 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3215 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3216}
3217
3218/*
3219 * should be run under rtnl lock
3220 */
3221static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3222{
3223 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3224
3225 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3226
3227 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3229 barrier();
3230 mmiowb();
3231}
3232
3233/*
3234 * should be run under rtnl lock
3235 */
3236static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3237{
3238 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3239
3240 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3241
3242 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3243 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3244 barrier();
3245 mmiowb();
3246
3247 return val1;
3248}
3249
3250/*
3251 * should be run under rtnl lock
3252 */
3253static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3254{
3255 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3256}
3257
3258static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3259{
3260 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3261 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3262}
3263
3264static inline void _print_next_block(int idx, const char *blk)
3265{
3266 if (idx)
3267 pr_cont(", ");
3268 pr_cont("%s", blk);
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3280 _print_next_block(par_num++, "BRB");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3283 _print_next_block(par_num++, "PARSER");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "TSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3289 _print_next_block(par_num++, "SEARCHER");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3292 _print_next_block(par_num++, "TSEMI");
3293 break;
3294 }
3295
3296 /* Clear the bit */
3297 sig &= ~cur_bit;
3298 }
3299 }
3300
3301 return par_num;
3302}
3303
3304static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3305{
3306 int i = 0;
3307 u32 cur_bit = 0;
3308 for (i = 0; sig; i++) {
3309 cur_bit = ((u32)0x1 << i);
3310 if (sig & cur_bit) {
3311 switch (cur_bit) {
3312 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3313 _print_next_block(par_num++, "PBCLIENT");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3316 _print_next_block(par_num++, "QM");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3319 _print_next_block(par_num++, "XSDM");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3322 _print_next_block(par_num++, "XSEMI");
3323 break;
3324 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3325 _print_next_block(par_num++, "DOORBELLQ");
3326 break;
3327 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3328 _print_next_block(par_num++, "VAUX PCI CORE");
3329 break;
3330 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3331 _print_next_block(par_num++, "DEBUG");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3334 _print_next_block(par_num++, "USDM");
3335 break;
3336 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3337 _print_next_block(par_num++, "USEMI");
3338 break;
3339 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3340 _print_next_block(par_num++, "UPB");
3341 break;
3342 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3343 _print_next_block(par_num++, "CSDM");
3344 break;
3345 }
3346
3347 /* Clear the bit */
3348 sig &= ~cur_bit;
3349 }
3350 }
3351
3352 return par_num;
3353}
3354
3355static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3356{
3357 int i = 0;
3358 u32 cur_bit = 0;
3359 for (i = 0; sig; i++) {
3360 cur_bit = ((u32)0x1 << i);
3361 if (sig & cur_bit) {
3362 switch (cur_bit) {
3363 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3364 _print_next_block(par_num++, "CSEMI");
3365 break;
3366 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3367 _print_next_block(par_num++, "PXP");
3368 break;
3369 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3370 _print_next_block(par_num++,
3371 "PXPPCICLOCKCLIENT");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3374 _print_next_block(par_num++, "CFC");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3377 _print_next_block(par_num++, "CDU");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3380 _print_next_block(par_num++, "IGU");
3381 break;
3382 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3383 _print_next_block(par_num++, "MISC");
3384 break;
3385 }
3386
3387 /* Clear the bit */
3388 sig &= ~cur_bit;
3389 }
3390 }
3391
3392 return par_num;
3393}
3394
3395static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3396{
3397 int i = 0;
3398 u32 cur_bit = 0;
3399 for (i = 0; sig; i++) {
3400 cur_bit = ((u32)0x1 << i);
3401 if (sig & cur_bit) {
3402 switch (cur_bit) {
3403 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3404 _print_next_block(par_num++, "MCP ROM");
3405 break;
3406 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3407 _print_next_block(par_num++, "MCP UMP RX");
3408 break;
3409 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3410 _print_next_block(par_num++, "MCP UMP TX");
3411 break;
3412 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3413 _print_next_block(par_num++, "MCP SCPAD");
3414 break;
3415 }
3416
3417 /* Clear the bit */
3418 sig &= ~cur_bit;
3419 }
3420 }
3421
3422 return par_num;
3423}
3424
3425static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3426 u32 sig2, u32 sig3)
3427{
3428 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3429 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3430 int par_num = 0;
3431 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3432 "[0]:0x%08x [1]:0x%08x "
3433 "[2]:0x%08x [3]:0x%08x\n",
3434 sig0 & HW_PRTY_ASSERT_SET_0,
3435 sig1 & HW_PRTY_ASSERT_SET_1,
3436 sig2 & HW_PRTY_ASSERT_SET_2,
3437 sig3 & HW_PRTY_ASSERT_SET_3);
3438 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3439 bp->dev->name);
3440 par_num = bnx2x_print_blocks_with_parity0(
3441 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3442 par_num = bnx2x_print_blocks_with_parity1(
3443 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3444 par_num = bnx2x_print_blocks_with_parity2(
3445 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3446 par_num = bnx2x_print_blocks_with_parity3(
3447 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3448 printk("\n");
3449 return true;
3450 } else
3451 return false;
3452}
3453
3454static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003455{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003456 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003457 int port = BP_PORT(bp);
3458
3459 attn.sig[0] = REG_RD(bp,
3460 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3461 port*4);
3462 attn.sig[1] = REG_RD(bp,
3463 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3464 port*4);
3465 attn.sig[2] = REG_RD(bp,
3466 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3467 port*4);
3468 attn.sig[3] = REG_RD(bp,
3469 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3470 port*4);
3471
3472 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3473 attn.sig[3]);
3474}
3475
3476static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3477{
3478 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003479 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003480 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003481 u32 reg_addr;
3482 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003483 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003484
3485 /* need to take HW lock because MCP or other port might also
3486 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003487 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003488
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003489 if (bnx2x_chk_parity_attn(bp)) {
3490 bp->recovery_state = BNX2X_RECOVERY_INIT;
3491 bnx2x_set_reset_in_progress(bp);
3492 schedule_delayed_work(&bp->reset_task, 0);
3493 /* Disable HW interrupts */
3494 bnx2x_int_disable(bp);
3495 bnx2x_release_alr(bp);
3496 /* In case of parity errors don't handle attentions so that
3497 * other function would "see" parity errors.
3498 */
3499 return;
3500 }
3501
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003502 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3503 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3504 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3505 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003506 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3507 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003508
3509 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3510 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003511 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003512
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003513 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003514 index, group_mask->sig[0], group_mask->sig[1],
3515 group_mask->sig[2], group_mask->sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003516
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003517 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003518 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003519 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003520 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003521 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003522 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003523 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003524 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003525 }
3526 }
3527
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003528 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003529
Eilon Greenstein5c862842008-08-13 15:51:48 -07003530 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003531
3532 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003533 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3534 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003535 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003536
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003537 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003538 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003539
3540 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3541 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3542
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003543 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3544 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003545
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003546 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3547 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003548 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003549 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3550
3551 REG_WR(bp, reg_addr, aeu_mask);
3552 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003553
3554 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3555 bp->attn_state &= ~deasserted;
3556 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3557}
3558
3559static void bnx2x_attn_int(struct bnx2x *bp)
3560{
3561 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003562 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3563 attn_bits);
3564 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3565 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003566 u32 attn_state = bp->attn_state;
3567
3568 /* look for changed bits */
3569 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3570 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3571
3572 DP(NETIF_MSG_HW,
3573 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3574 attn_bits, attn_ack, asserted, deasserted);
3575
3576 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003577 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003578
3579 /* handle bits that were raised */
3580 if (asserted)
3581 bnx2x_attn_int_asserted(bp, asserted);
3582
3583 if (deasserted)
3584 bnx2x_attn_int_deasserted(bp, deasserted);
3585}
3586
3587static void bnx2x_sp_task(struct work_struct *work)
3588{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003589 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003590 u16 status;
3591
3592 /* Return here if interrupt is disabled */
3593 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003594 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003595 return;
3596 }
3597
3598 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003599/* if (status == 0) */
3600/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003601
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003602 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003603
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003604 /* HW attentions */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003605 if (status & 0x1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003606 bnx2x_attn_int(bp);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003607 status &= ~0x1;
3608 }
3609
3610 /* CStorm events: STAT_QUERY */
3611 if (status & 0x2) {
3612 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3613 status &= ~0x2;
3614 }
3615
3616 if (unlikely(status))
3617 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3618 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003619
Eilon Greenstein68d59482009-01-14 21:27:36 -08003620 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003621 IGU_INT_NOP, 1);
3622 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3623 IGU_INT_NOP, 1);
3624 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3625 IGU_INT_NOP, 1);
3626 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3627 IGU_INT_NOP, 1);
3628 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3629 IGU_INT_ENABLE, 1);
3630}
3631
3632static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3633{
3634 struct net_device *dev = dev_instance;
3635 struct bnx2x *bp = netdev_priv(dev);
3636
3637 /* Return here if interrupt is disabled */
3638 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003639 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003640 return IRQ_HANDLED;
3641 }
3642
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003643 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003644
3645#ifdef BNX2X_STOP_ON_ERROR
3646 if (unlikely(bp->panic))
3647 return IRQ_HANDLED;
3648#endif
3649
Michael Chan993ac7b2009-10-10 13:46:56 +00003650#ifdef BCM_CNIC
3651 {
3652 struct cnic_ops *c_ops;
3653
3654 rcu_read_lock();
3655 c_ops = rcu_dereference(bp->cnic_ops);
3656 if (c_ops)
3657 c_ops->cnic_handler(bp->cnic_data, NULL);
3658 rcu_read_unlock();
3659 }
3660#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003661 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003662
3663 return IRQ_HANDLED;
3664}
3665
3666/* end of slow path */
3667
3668/* Statistics */
3669
3670/****************************************************************************
3671* Macros
3672****************************************************************************/
3673
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003674/* sum[hi:lo] += add[hi:lo] */
3675#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3676 do { \
3677 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003678 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003679 } while (0)
3680
3681/* difference = minuend - subtrahend */
3682#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3683 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003684 if (m_lo < s_lo) { \
3685 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003686 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003687 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003688 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003689 d_hi--; \
3690 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003691 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003692 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003693 d_hi = 0; \
3694 d_lo = 0; \
3695 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003696 } else { \
3697 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003699 d_hi = 0; \
3700 d_lo = 0; \
3701 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003702 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003703 d_hi = m_hi - s_hi; \
3704 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003705 } \
3706 } \
3707 } while (0)
3708
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003709#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003710 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003711 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3712 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3713 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3714 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3715 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3716 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003717 } while (0)
3718
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003719#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003720 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003721 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3722 diff.lo, new->s##_lo, old->s##_lo); \
3723 ADD_64(estats->t##_hi, diff.hi, \
3724 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003725 } while (0)
3726
3727/* sum[hi:lo] += add */
3728#define ADD_EXTEND_64(s_hi, s_lo, a) \
3729 do { \
3730 s_lo += a; \
3731 s_hi += (s_lo < a) ? 1 : 0; \
3732 } while (0)
3733
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003734#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003735 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003736 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3737 pstats->mac_stx[1].s##_lo, \
3738 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003739 } while (0)
3740
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003741#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003742 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003743 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3744 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746 } while (0)
3747
3748#define UPDATE_EXTEND_USTAT(s, t) \
3749 do { \
3750 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3751 old_uclient->s = uclient->s; \
3752 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003753 } while (0)
3754
3755#define UPDATE_EXTEND_XSTAT(s, t) \
3756 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003757 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3758 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003759 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3760 } while (0)
3761
3762/* minuend -= subtrahend */
3763#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3764 do { \
3765 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3766 } while (0)
3767
3768/* minuend[hi:lo] -= subtrahend */
3769#define SUB_EXTEND_64(m_hi, m_lo, s) \
3770 do { \
3771 SUB_64(m_hi, 0, m_lo, s); \
3772 } while (0)
3773
3774#define SUB_EXTEND_USTAT(s, t) \
3775 do { \
3776 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3777 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003778 } while (0)
3779
3780/*
3781 * General service functions
3782 */
3783
3784static inline long bnx2x_hilo(u32 *hiref)
3785{
3786 u32 lo = *(hiref + 1);
3787#if (BITS_PER_LONG == 64)
3788 u32 hi = *hiref;
3789
3790 return HILO_U64(hi, lo);
3791#else
3792 return lo;
3793#endif
3794}
3795
3796/*
3797 * Init service functions
3798 */
3799
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003800static void bnx2x_storm_stats_post(struct bnx2x *bp)
3801{
3802 if (!bp->stats_pending) {
3803 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003804 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003805
3806 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003807 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003808 for_each_queue(bp, i)
3809 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003810
3811 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3812 ((u32 *)&ramrod_data)[1],
3813 ((u32 *)&ramrod_data)[0], 0);
3814 if (rc == 0) {
3815 /* stats ramrod has it's own slot on the spq */
3816 bp->spq_left++;
3817 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818 }
3819 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003820}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003822static void bnx2x_hw_stats_post(struct bnx2x *bp)
3823{
3824 struct dmae_command *dmae = &bp->stats_dmae;
3825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3826
3827 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003828 if (CHIP_REV_IS_SLOW(bp))
3829 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003830
3831 /* loader */
3832 if (bp->executer_idx) {
3833 int loader_idx = PMF_DMAE_C(bp);
3834
3835 memset(dmae, 0, sizeof(struct dmae_command));
3836
3837 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3838 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3839 DMAE_CMD_DST_RESET |
3840#ifdef __BIG_ENDIAN
3841 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3842#else
3843 DMAE_CMD_ENDIANITY_DW_SWAP |
3844#endif
3845 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3846 DMAE_CMD_PORT_0) |
3847 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3848 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3849 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3850 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3851 sizeof(struct dmae_command) *
3852 (loader_idx + 1)) >> 2;
3853 dmae->dst_addr_hi = 0;
3854 dmae->len = sizeof(struct dmae_command) >> 2;
3855 if (CHIP_IS_E1(bp))
3856 dmae->len--;
3857 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3858 dmae->comp_addr_hi = 0;
3859 dmae->comp_val = 1;
3860
3861 *stats_comp = 0;
3862 bnx2x_post_dmae(bp, dmae, loader_idx);
3863
3864 } else if (bp->func_stx) {
3865 *stats_comp = 0;
3866 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3867 }
3868}
3869
3870static int bnx2x_stats_comp(struct bnx2x *bp)
3871{
3872 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3873 int cnt = 10;
3874
3875 might_sleep();
3876 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003877 if (!cnt) {
3878 BNX2X_ERR("timeout waiting for stats finished\n");
3879 break;
3880 }
3881 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003882 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003883 }
3884 return 1;
3885}
3886
3887/*
3888 * Statistics service functions
3889 */
3890
3891static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3892{
3893 struct dmae_command *dmae;
3894 u32 opcode;
3895 int loader_idx = PMF_DMAE_C(bp);
3896 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3897
3898 /* sanity */
3899 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3900 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003901 return;
3902 }
3903
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003904 bp->executer_idx = 0;
3905
3906 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3907 DMAE_CMD_C_ENABLE |
3908 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3909#ifdef __BIG_ENDIAN
3910 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3911#else
3912 DMAE_CMD_ENDIANITY_DW_SWAP |
3913#endif
3914 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3915 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3916
3917 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3918 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3919 dmae->src_addr_lo = bp->port.port_stx >> 2;
3920 dmae->src_addr_hi = 0;
3921 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3922 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3923 dmae->len = DMAE_LEN32_RD_MAX;
3924 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3925 dmae->comp_addr_hi = 0;
3926 dmae->comp_val = 1;
3927
3928 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3929 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3930 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3931 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003932 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3933 DMAE_LEN32_RD_MAX * 4);
3934 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3935 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003936 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3937 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3938 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3939 dmae->comp_val = DMAE_COMP_VAL;
3940
3941 *stats_comp = 0;
3942 bnx2x_hw_stats_post(bp);
3943 bnx2x_stats_comp(bp);
3944}
3945
3946static void bnx2x_port_stats_init(struct bnx2x *bp)
3947{
3948 struct dmae_command *dmae;
3949 int port = BP_PORT(bp);
3950 int vn = BP_E1HVN(bp);
3951 u32 opcode;
3952 int loader_idx = PMF_DMAE_C(bp);
3953 u32 mac_addr;
3954 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3955
3956 /* sanity */
3957 if (!bp->link_vars.link_up || !bp->port.pmf) {
3958 BNX2X_ERR("BUG!\n");
3959 return;
3960 }
3961
3962 bp->executer_idx = 0;
3963
3964 /* MCP */
3965 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3966 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3967 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3968#ifdef __BIG_ENDIAN
3969 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3970#else
3971 DMAE_CMD_ENDIANITY_DW_SWAP |
3972#endif
3973 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3974 (vn << DMAE_CMD_E1HVN_SHIFT));
3975
3976 if (bp->port.port_stx) {
3977
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = opcode;
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3982 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3983 dmae->dst_addr_hi = 0;
3984 dmae->len = sizeof(struct host_port_stats) >> 2;
3985 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986 dmae->comp_addr_hi = 0;
3987 dmae->comp_val = 1;
3988 }
3989
3990 if (bp->func_stx) {
3991
3992 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3993 dmae->opcode = opcode;
3994 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3995 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3996 dmae->dst_addr_lo = bp->func_stx >> 2;
3997 dmae->dst_addr_hi = 0;
3998 dmae->len = sizeof(struct host_func_stats) >> 2;
3999 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4000 dmae->comp_addr_hi = 0;
4001 dmae->comp_val = 1;
4002 }
4003
4004 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004005 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4006 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4007 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4008#ifdef __BIG_ENDIAN
4009 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4010#else
4011 DMAE_CMD_ENDIANITY_DW_SWAP |
4012#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004013 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4014 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004015
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07004016 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004017
4018 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4019 NIG_REG_INGRESS_BMAC0_MEM);
4020
4021 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4022 BIGMAC_REGISTER_TX_STAT_GTBYT */
4023 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4024 dmae->opcode = opcode;
4025 dmae->src_addr_lo = (mac_addr +
4026 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4027 dmae->src_addr_hi = 0;
4028 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4029 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4030 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4031 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4032 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4033 dmae->comp_addr_hi = 0;
4034 dmae->comp_val = 1;
4035
4036 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4037 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4038 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039 dmae->opcode = opcode;
4040 dmae->src_addr_lo = (mac_addr +
4041 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4042 dmae->src_addr_hi = 0;
4043 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004044 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004045 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004046 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004047 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4048 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4049 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050 dmae->comp_addr_hi = 0;
4051 dmae->comp_val = 1;
4052
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07004053 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004054
4055 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4056
4057 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4058 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4059 dmae->opcode = opcode;
4060 dmae->src_addr_lo = (mac_addr +
4061 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4062 dmae->src_addr_hi = 0;
4063 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4064 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4065 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4066 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4067 dmae->comp_addr_hi = 0;
4068 dmae->comp_val = 1;
4069
4070 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (mac_addr +
4074 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004077 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004079 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004080 dmae->len = 1;
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4083 dmae->comp_val = 1;
4084
4085 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4086 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4087 dmae->opcode = opcode;
4088 dmae->src_addr_lo = (mac_addr +
4089 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4090 dmae->src_addr_hi = 0;
4091 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004092 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004093 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004094 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4096 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097 dmae->comp_addr_hi = 0;
4098 dmae->comp_val = 1;
4099 }
4100
4101 /* NIG */
4102 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004103 dmae->opcode = opcode;
4104 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4105 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4106 dmae->src_addr_hi = 0;
4107 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4108 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4109 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4110 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111 dmae->comp_addr_hi = 0;
4112 dmae->comp_val = 1;
4113
4114 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115 dmae->opcode = opcode;
4116 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4117 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4118 dmae->src_addr_hi = 0;
4119 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4120 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4121 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4122 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4123 dmae->len = (2*sizeof(u32)) >> 2;
4124 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4125 dmae->comp_addr_hi = 0;
4126 dmae->comp_val = 1;
4127
4128 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004129 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4130 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4131 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4132#ifdef __BIG_ENDIAN
4133 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4134#else
4135 DMAE_CMD_ENDIANITY_DW_SWAP |
4136#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004137 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4138 (vn << DMAE_CMD_E1HVN_SHIFT));
4139 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4140 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004141 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004142 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4143 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4144 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4145 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4146 dmae->len = (2*sizeof(u32)) >> 2;
4147 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4148 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4149 dmae->comp_val = DMAE_COMP_VAL;
4150
4151 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004152}
4153
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004154static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004155{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004156 struct dmae_command *dmae = &bp->stats_dmae;
4157 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004158
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004159 /* sanity */
4160 if (!bp->func_stx) {
4161 BNX2X_ERR("BUG!\n");
4162 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004164
4165 bp->executer_idx = 0;
4166 memset(dmae, 0, sizeof(struct dmae_command));
4167
4168 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4169 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4170 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4171#ifdef __BIG_ENDIAN
4172 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4173#else
4174 DMAE_CMD_ENDIANITY_DW_SWAP |
4175#endif
4176 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4177 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4178 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4179 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4180 dmae->dst_addr_lo = bp->func_stx >> 2;
4181 dmae->dst_addr_hi = 0;
4182 dmae->len = sizeof(struct host_func_stats) >> 2;
4183 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4184 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4185 dmae->comp_val = DMAE_COMP_VAL;
4186
4187 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004188}
4189
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004190static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004192 if (bp->port.pmf)
4193 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004194
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004195 else if (bp->func_stx)
4196 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004197
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004198 bnx2x_hw_stats_post(bp);
4199 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200}
4201
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004202static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004203{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004204 bnx2x_stats_comp(bp);
4205 bnx2x_stats_pmf_update(bp);
4206 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004207}
4208
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004209static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004211 bnx2x_stats_comp(bp);
4212 bnx2x_stats_start(bp);
4213}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004215static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4216{
4217 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4218 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004219 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004220 struct {
4221 u32 lo;
4222 u32 hi;
4223 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004224
4225 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4226 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4227 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4228 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4229 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4230 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004231 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004232 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004233 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004234 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4235 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4236 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4237 UPDATE_STAT64(tx_stat_gt127,
4238 tx_stat_etherstatspkts65octetsto127octets);
4239 UPDATE_STAT64(tx_stat_gt255,
4240 tx_stat_etherstatspkts128octetsto255octets);
4241 UPDATE_STAT64(tx_stat_gt511,
4242 tx_stat_etherstatspkts256octetsto511octets);
4243 UPDATE_STAT64(tx_stat_gt1023,
4244 tx_stat_etherstatspkts512octetsto1023octets);
4245 UPDATE_STAT64(tx_stat_gt1518,
4246 tx_stat_etherstatspkts1024octetsto1522octets);
4247 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4248 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4249 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4250 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4251 UPDATE_STAT64(tx_stat_gterr,
4252 tx_stat_dot3statsinternalmactransmiterrors);
4253 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004254
4255 estats->pause_frames_received_hi =
4256 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4257 estats->pause_frames_received_lo =
4258 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4259
4260 estats->pause_frames_sent_hi =
4261 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4262 estats->pause_frames_sent_lo =
4263 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004264}
4265
4266static void bnx2x_emac_stats_update(struct bnx2x *bp)
4267{
4268 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4269 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004270 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004271
4272 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4273 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4274 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4275 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4276 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4277 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4278 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4279 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4280 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4281 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4282 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4283 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4284 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4285 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4286 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4287 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4288 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4289 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4290 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4291 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4292 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4293 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4294 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4295 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4296 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4297 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4298 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4299 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4300 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4301 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4302 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004303
4304 estats->pause_frames_received_hi =
4305 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4306 estats->pause_frames_received_lo =
4307 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4308 ADD_64(estats->pause_frames_received_hi,
4309 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4310 estats->pause_frames_received_lo,
4311 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4312
4313 estats->pause_frames_sent_hi =
4314 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4315 estats->pause_frames_sent_lo =
4316 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4317 ADD_64(estats->pause_frames_sent_hi,
4318 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4319 estats->pause_frames_sent_lo,
4320 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004321}
4322
4323static int bnx2x_hw_stats_update(struct bnx2x *bp)
4324{
4325 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4326 struct nig_stats *old = &(bp->port.old_nig_stats);
4327 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004329 struct {
4330 u32 lo;
4331 u32 hi;
4332 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004333
4334 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4335 bnx2x_bmac_stats_update(bp);
4336
4337 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4338 bnx2x_emac_stats_update(bp);
4339
4340 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00004341 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004342 return -1;
4343 }
4344
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004345 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4346 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004347 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4348 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004349
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004350 UPDATE_STAT64_NIG(egress_mac_pkt0,
4351 etherstatspkts1024octetsto1522octets);
4352 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004353
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004354 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004355
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004356 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4357 sizeof(struct mac_stx));
4358 estats->brb_drop_hi = pstats->brb_drop_hi;
4359 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004361 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004363 if (!BP_NOMCP(bp)) {
4364 u32 nig_timer_max =
4365 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4366 if (nig_timer_max != estats->nig_timer_max) {
4367 estats->nig_timer_max = nig_timer_max;
4368 BNX2X_ERR("NIG timer max (%u)\n",
4369 estats->nig_timer_max);
4370 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00004371 }
4372
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004373 return 0;
4374}
4375
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004376static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004377{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004378 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004379 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004380 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004381 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4382 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004383 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004384
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004385 memcpy(&(fstats->total_bytes_received_hi),
4386 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004387 sizeof(struct host_func_stats) - 2*sizeof(u32));
4388 estats->error_bytes_received_hi = 0;
4389 estats->error_bytes_received_lo = 0;
4390 estats->etherstatsoverrsizepkts_hi = 0;
4391 estats->etherstatsoverrsizepkts_lo = 0;
4392 estats->no_buff_discard_hi = 0;
4393 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004394
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004395 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004396 struct bnx2x_fastpath *fp = &bp->fp[i];
4397 int cl_id = fp->cl_id;
4398 struct tstorm_per_client_stats *tclient =
4399 &stats->tstorm_common.client_statistics[cl_id];
4400 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4401 struct ustorm_per_client_stats *uclient =
4402 &stats->ustorm_common.client_statistics[cl_id];
4403 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4404 struct xstorm_per_client_stats *xclient =
4405 &stats->xstorm_common.client_statistics[cl_id];
4406 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4407 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4408 u32 diff;
4409
4410 /* are storm stats valid? */
4411 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4412 bp->stats_counter) {
4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004414 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
Eilon Greensteinde832a52009-02-12 08:36:33 +00004415 i, xclient->stats_counter, bp->stats_counter);
4416 return -1;
4417 }
4418 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4419 bp->stats_counter) {
4420 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004421 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
Eilon Greensteinde832a52009-02-12 08:36:33 +00004422 i, tclient->stats_counter, bp->stats_counter);
4423 return -2;
4424 }
4425 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4426 bp->stats_counter) {
4427 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004428 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
Eilon Greensteinde832a52009-02-12 08:36:33 +00004429 i, uclient->stats_counter, bp->stats_counter);
4430 return -4;
4431 }
4432
4433 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004434 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004435 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004436 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4437
4438 ADD_64(qstats->total_bytes_received_hi,
4439 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4440 qstats->total_bytes_received_lo,
4441 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4442
4443 ADD_64(qstats->total_bytes_received_hi,
4444 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4445 qstats->total_bytes_received_lo,
4446 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4447
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +00004448 SUB_64(qstats->total_bytes_received_hi,
4449 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4450 qstats->total_bytes_received_lo,
4451 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4452
4453 SUB_64(qstats->total_bytes_received_hi,
4454 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4455 qstats->total_bytes_received_lo,
4456 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4457
4458 SUB_64(qstats->total_bytes_received_hi,
4459 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4460 qstats->total_bytes_received_lo,
4461 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4462
Eilon Greensteinca003922009-08-12 22:53:28 -07004463 qstats->valid_bytes_received_hi =
4464 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004465 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004466 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004467
Eilon Greensteinde832a52009-02-12 08:36:33 +00004468 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004469 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004470 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004471 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004472
4473 ADD_64(qstats->total_bytes_received_hi,
4474 qstats->error_bytes_received_hi,
4475 qstats->total_bytes_received_lo,
4476 qstats->error_bytes_received_lo);
4477
4478 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4479 total_unicast_packets_received);
4480 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4481 total_multicast_packets_received);
4482 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4483 total_broadcast_packets_received);
4484 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4485 etherstatsoverrsizepkts);
4486 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4487
4488 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4489 total_unicast_packets_received);
4490 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4491 total_multicast_packets_received);
4492 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4493 total_broadcast_packets_received);
4494 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4495 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4496 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4497
4498 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004499 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004500 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004501 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4502
4503 ADD_64(qstats->total_bytes_transmitted_hi,
4504 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4505 qstats->total_bytes_transmitted_lo,
4506 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4507
4508 ADD_64(qstats->total_bytes_transmitted_hi,
4509 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4510 qstats->total_bytes_transmitted_lo,
4511 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004512
4513 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4514 total_unicast_packets_transmitted);
4515 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4516 total_multicast_packets_transmitted);
4517 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4518 total_broadcast_packets_transmitted);
4519
4520 old_tclient->checksum_discard = tclient->checksum_discard;
4521 old_tclient->ttl0_discard = tclient->ttl0_discard;
4522
4523 ADD_64(fstats->total_bytes_received_hi,
4524 qstats->total_bytes_received_hi,
4525 fstats->total_bytes_received_lo,
4526 qstats->total_bytes_received_lo);
4527 ADD_64(fstats->total_bytes_transmitted_hi,
4528 qstats->total_bytes_transmitted_hi,
4529 fstats->total_bytes_transmitted_lo,
4530 qstats->total_bytes_transmitted_lo);
4531 ADD_64(fstats->total_unicast_packets_received_hi,
4532 qstats->total_unicast_packets_received_hi,
4533 fstats->total_unicast_packets_received_lo,
4534 qstats->total_unicast_packets_received_lo);
4535 ADD_64(fstats->total_multicast_packets_received_hi,
4536 qstats->total_multicast_packets_received_hi,
4537 fstats->total_multicast_packets_received_lo,
4538 qstats->total_multicast_packets_received_lo);
4539 ADD_64(fstats->total_broadcast_packets_received_hi,
4540 qstats->total_broadcast_packets_received_hi,
4541 fstats->total_broadcast_packets_received_lo,
4542 qstats->total_broadcast_packets_received_lo);
4543 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4544 qstats->total_unicast_packets_transmitted_hi,
4545 fstats->total_unicast_packets_transmitted_lo,
4546 qstats->total_unicast_packets_transmitted_lo);
4547 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4548 qstats->total_multicast_packets_transmitted_hi,
4549 fstats->total_multicast_packets_transmitted_lo,
4550 qstats->total_multicast_packets_transmitted_lo);
4551 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4552 qstats->total_broadcast_packets_transmitted_hi,
4553 fstats->total_broadcast_packets_transmitted_lo,
4554 qstats->total_broadcast_packets_transmitted_lo);
4555 ADD_64(fstats->valid_bytes_received_hi,
4556 qstats->valid_bytes_received_hi,
4557 fstats->valid_bytes_received_lo,
4558 qstats->valid_bytes_received_lo);
4559
4560 ADD_64(estats->error_bytes_received_hi,
4561 qstats->error_bytes_received_hi,
4562 estats->error_bytes_received_lo,
4563 qstats->error_bytes_received_lo);
4564 ADD_64(estats->etherstatsoverrsizepkts_hi,
4565 qstats->etherstatsoverrsizepkts_hi,
4566 estats->etherstatsoverrsizepkts_lo,
4567 qstats->etherstatsoverrsizepkts_lo);
4568 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4569 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4570 }
4571
4572 ADD_64(fstats->total_bytes_received_hi,
4573 estats->rx_stat_ifhcinbadoctets_hi,
4574 fstats->total_bytes_received_lo,
4575 estats->rx_stat_ifhcinbadoctets_lo);
4576
4577 memcpy(estats, &(fstats->total_bytes_received_hi),
4578 sizeof(struct host_func_stats) - 2*sizeof(u32));
4579
4580 ADD_64(estats->etherstatsoverrsizepkts_hi,
4581 estats->rx_stat_dot3statsframestoolong_hi,
4582 estats->etherstatsoverrsizepkts_lo,
4583 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004584 ADD_64(estats->error_bytes_received_hi,
4585 estats->rx_stat_ifhcinbadoctets_hi,
4586 estats->error_bytes_received_lo,
4587 estats->rx_stat_ifhcinbadoctets_lo);
4588
Eilon Greensteinde832a52009-02-12 08:36:33 +00004589 if (bp->port.pmf) {
4590 estats->mac_filter_discard =
4591 le32_to_cpu(tport->mac_filter_discard);
4592 estats->xxoverflow_discard =
4593 le32_to_cpu(tport->xxoverflow_discard);
4594 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004595 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004596 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4597 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004598
4599 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4600
Eilon Greensteinde832a52009-02-12 08:36:33 +00004601 bp->stats_pending = 0;
4602
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004603 return 0;
4604}
4605
4606static void bnx2x_net_stats_update(struct bnx2x *bp)
4607{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004608 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004609 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004610 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004611
4612 nstats->rx_packets =
4613 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4614 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4615 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4616
4617 nstats->tx_packets =
4618 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4619 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4620 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4621
Eilon Greensteinde832a52009-02-12 08:36:33 +00004622 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004624 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004625
Eilon Greensteinde832a52009-02-12 08:36:33 +00004626 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004627 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004628 nstats->rx_dropped +=
4629 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4630
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004631 nstats->tx_dropped = 0;
4632
4633 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004634 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004635
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004636 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004637 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004638
4639 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004640 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4641 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4642 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4643 bnx2x_hilo(&estats->brb_truncate_hi);
4644 nstats->rx_crc_errors =
4645 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4646 nstats->rx_frame_errors =
4647 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4648 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004649 nstats->rx_missed_errors = estats->xxoverflow_discard;
4650
4651 nstats->rx_errors = nstats->rx_length_errors +
4652 nstats->rx_over_errors +
4653 nstats->rx_crc_errors +
4654 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004655 nstats->rx_fifo_errors +
4656 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004657
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004658 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004659 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4660 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4661 nstats->tx_carrier_errors =
4662 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004663 nstats->tx_fifo_errors = 0;
4664 nstats->tx_heartbeat_errors = 0;
4665 nstats->tx_window_errors = 0;
4666
4667 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004668 nstats->tx_carrier_errors +
4669 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4670}
4671
4672static void bnx2x_drv_stats_update(struct bnx2x *bp)
4673{
4674 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4675 int i;
4676
4677 estats->driver_xoff = 0;
4678 estats->rx_err_discard_pkt = 0;
4679 estats->rx_skb_alloc_failed = 0;
4680 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004681 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004682 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4683
4684 estats->driver_xoff += qstats->driver_xoff;
4685 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4686 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4687 estats->hw_csum_err += qstats->hw_csum_err;
4688 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004689}
4690
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004691static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004692{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004693 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004694
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004695 if (*stats_comp != DMAE_COMP_VAL)
4696 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004698 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004699 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004700
Eilon Greensteinde832a52009-02-12 08:36:33 +00004701 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4702 BNX2X_ERR("storm stats were not updated for 3 times\n");
4703 bnx2x_panic();
4704 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004705 }
4706
Eilon Greensteinde832a52009-02-12 08:36:33 +00004707 bnx2x_net_stats_update(bp);
4708 bnx2x_drv_stats_update(bp);
4709
Joe Perches7995c642010-02-17 15:01:52 +00004710 if (netif_msg_timer(bp)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004711 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004712 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004713
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +00004714 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4715 bp->dev->name,
Eilon Greensteinde832a52009-02-12 08:36:33 +00004716 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004717
4718 for_each_queue(bp, i) {
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +00004719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721
4722 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4723 " rx pkt(%lu) rx calls(%lu %lu)\n",
4724 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4725 fp->rx_comp_cons),
4726 le16_to_cpu(*fp->rx_cons_sb),
4727 bnx2x_hilo(&qstats->
4728 total_unicast_packets_received_hi),
4729 fp->rx_calls, fp->rx_pkt);
4730 }
4731
4732 for_each_queue(bp, i) {
4733 struct bnx2x_fastpath *fp = &bp->fp[i];
4734 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4735 struct netdev_queue *txq =
4736 netdev_get_tx_queue(bp->dev, i);
4737
4738 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4739 " tx pkt(%lu) tx calls (%lu)"
4740 " %s (Xoff events %u)\n",
4741 fp->name, bnx2x_tx_avail(fp),
4742 le16_to_cpu(*fp->tx_cons_sb),
4743 bnx2x_hilo(&qstats->
4744 total_unicast_packets_transmitted_hi),
4745 fp->tx_pkt,
4746 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4747 qstats->driver_xoff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004748 }
4749 }
4750
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004751 bnx2x_hw_stats_post(bp);
4752 bnx2x_storm_stats_post(bp);
4753}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004754
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004755static void bnx2x_port_stats_stop(struct bnx2x *bp)
4756{
4757 struct dmae_command *dmae;
4758 u32 opcode;
4759 int loader_idx = PMF_DMAE_C(bp);
4760 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004761
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004762 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004763
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004764 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4765 DMAE_CMD_C_ENABLE |
4766 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004767#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004768 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004769#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004770 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004771#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004772 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4773 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4774
4775 if (bp->port.port_stx) {
4776
4777 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4778 if (bp->func_stx)
4779 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4780 else
4781 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4782 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4783 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4784 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004785 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004786 dmae->len = sizeof(struct host_port_stats) >> 2;
4787 if (bp->func_stx) {
4788 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4789 dmae->comp_addr_hi = 0;
4790 dmae->comp_val = 1;
4791 } else {
4792 dmae->comp_addr_lo =
4793 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4794 dmae->comp_addr_hi =
4795 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4796 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004797
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004798 *stats_comp = 0;
4799 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004800 }
4801
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004802 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004803
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004804 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4805 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4806 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4807 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4808 dmae->dst_addr_lo = bp->func_stx >> 2;
4809 dmae->dst_addr_hi = 0;
4810 dmae->len = sizeof(struct host_func_stats) >> 2;
4811 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4812 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4813 dmae->comp_val = DMAE_COMP_VAL;
4814
4815 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004816 }
4817}
4818
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004819static void bnx2x_stats_stop(struct bnx2x *bp)
4820{
4821 int update = 0;
4822
4823 bnx2x_stats_comp(bp);
4824
4825 if (bp->port.pmf)
4826 update = (bnx2x_hw_stats_update(bp) == 0);
4827
4828 update |= (bnx2x_storm_stats_update(bp) == 0);
4829
4830 if (update) {
4831 bnx2x_net_stats_update(bp);
4832
4833 if (bp->port.pmf)
4834 bnx2x_port_stats_stop(bp);
4835
4836 bnx2x_hw_stats_post(bp);
4837 bnx2x_stats_comp(bp);
4838 }
4839}
4840
4841static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4842{
4843}
4844
4845static const struct {
4846 void (*action)(struct bnx2x *bp);
4847 enum bnx2x_stats_state next_state;
4848} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4849/* state event */
4850{
4851/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4852/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4853/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4854/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4855},
4856{
4857/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4858/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4859/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4860/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4861}
4862};
4863
4864static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4865{
4866 enum bnx2x_stats_state state = bp->stats_state;
4867
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004868 if (unlikely(bp->panic))
4869 return;
4870
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004871 bnx2x_stats_stm[state][event].action(bp);
4872 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4873
Eilon Greenstein89246652009-08-12 08:23:56 +00004874 /* Make sure the state has been "changed" */
4875 smp_wmb();
4876
Joe Perches7995c642010-02-17 15:01:52 +00004877 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004878 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4879 state, event, bp->stats_state);
4880}
4881
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004882static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4883{
4884 struct dmae_command *dmae;
4885 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4886
4887 /* sanity */
4888 if (!bp->port.pmf || !bp->port.port_stx) {
4889 BNX2X_ERR("BUG!\n");
4890 return;
4891 }
4892
4893 bp->executer_idx = 0;
4894
4895 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4896 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4897 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4898 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4899#ifdef __BIG_ENDIAN
4900 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4901#else
4902 DMAE_CMD_ENDIANITY_DW_SWAP |
4903#endif
4904 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4905 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4908 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4909 dmae->dst_addr_hi = 0;
4910 dmae->len = sizeof(struct host_port_stats) >> 2;
4911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4913 dmae->comp_val = DMAE_COMP_VAL;
4914
4915 *stats_comp = 0;
4916 bnx2x_hw_stats_post(bp);
4917 bnx2x_stats_comp(bp);
4918}
4919
4920static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4921{
4922 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4923 int port = BP_PORT(bp);
4924 int func;
4925 u32 func_stx;
4926
4927 /* sanity */
4928 if (!bp->port.pmf || !bp->func_stx) {
4929 BNX2X_ERR("BUG!\n");
4930 return;
4931 }
4932
4933 /* save our func_stx */
4934 func_stx = bp->func_stx;
4935
4936 for (vn = VN_0; vn < vn_max; vn++) {
4937 func = 2*vn + port;
4938
4939 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4940 bnx2x_func_stats_init(bp);
4941 bnx2x_hw_stats_post(bp);
4942 bnx2x_stats_comp(bp);
4943 }
4944
4945 /* restore our func_stx */
4946 bp->func_stx = func_stx;
4947}
4948
4949static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4950{
4951 struct dmae_command *dmae = &bp->stats_dmae;
4952 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4953
4954 /* sanity */
4955 if (!bp->func_stx) {
4956 BNX2X_ERR("BUG!\n");
4957 return;
4958 }
4959
4960 bp->executer_idx = 0;
4961 memset(dmae, 0, sizeof(struct dmae_command));
4962
4963 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4964 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4965 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4966#ifdef __BIG_ENDIAN
4967 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4968#else
4969 DMAE_CMD_ENDIANITY_DW_SWAP |
4970#endif
4971 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4972 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4973 dmae->src_addr_lo = bp->func_stx >> 2;
4974 dmae->src_addr_hi = 0;
4975 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4976 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4977 dmae->len = sizeof(struct host_func_stats) >> 2;
4978 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4979 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4980 dmae->comp_val = DMAE_COMP_VAL;
4981
4982 *stats_comp = 0;
4983 bnx2x_hw_stats_post(bp);
4984 bnx2x_stats_comp(bp);
4985}
4986
4987static void bnx2x_stats_init(struct bnx2x *bp)
4988{
4989 int port = BP_PORT(bp);
4990 int func = BP_FUNC(bp);
4991 int i;
4992
4993 bp->stats_pending = 0;
4994 bp->executer_idx = 0;
4995 bp->stats_counter = 0;
4996
4997 /* port and func stats for management */
4998 if (!BP_NOMCP(bp)) {
4999 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
5000 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5001
5002 } else {
5003 bp->port.port_stx = 0;
5004 bp->func_stx = 0;
5005 }
5006 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
5007 bp->port.port_stx, bp->func_stx);
5008
5009 /* port stats */
5010 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5011 bp->port.old_nig_stats.brb_discard =
5012 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5013 bp->port.old_nig_stats.brb_truncate =
5014 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5015 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5016 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5017 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5018 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5019
5020 /* function stats */
5021 for_each_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5023
5024 memset(&fp->old_tclient, 0,
5025 sizeof(struct tstorm_per_client_stats));
5026 memset(&fp->old_uclient, 0,
5027 sizeof(struct ustorm_per_client_stats));
5028 memset(&fp->old_xclient, 0,
5029 sizeof(struct xstorm_per_client_stats));
5030 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5031 }
5032
5033 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5034 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5035
5036 bp->stats_state = STATS_STATE_DISABLED;
5037
5038 if (bp->port.pmf) {
5039 if (bp->port.port_stx)
5040 bnx2x_port_stats_base_init(bp);
5041
5042 if (bp->func_stx)
5043 bnx2x_func_stats_base_init(bp);
5044
5045 } else if (bp->func_stx)
5046 bnx2x_func_stats_base_update(bp);
5047}
5048
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005049static void bnx2x_timer(unsigned long data)
5050{
5051 struct bnx2x *bp = (struct bnx2x *) data;
5052
5053 if (!netif_running(bp->dev))
5054 return;
5055
5056 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08005057 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005058
5059 if (poll) {
5060 struct bnx2x_fastpath *fp = &bp->fp[0];
5061 int rc;
5062
Eilon Greenstein7961f792009-03-02 07:59:31 +00005063 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005064 rc = bnx2x_rx_int(fp, 1000);
5065 }
5066
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005067 if (!BP_NOMCP(bp)) {
5068 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005069 u32 drv_pulse;
5070 u32 mcp_pulse;
5071
5072 ++bp->fw_drv_pulse_wr_seq;
5073 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5074 /* TBD - add SYSTEM_TIME */
5075 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005076 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005077
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005078 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005079 MCP_PULSE_SEQ_MASK);
5080 /* The delta between driver pulse and mcp response
5081 * should be 1 (before mcp response) or 0 (after mcp response)
5082 */
5083 if ((drv_pulse != mcp_pulse) &&
5084 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5085 /* someone lost a heartbeat... */
5086 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5087 drv_pulse, mcp_pulse);
5088 }
5089 }
5090
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07005091 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005092 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005093
Eliezer Tamirf1410642008-02-28 11:51:50 -08005094timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005095 mod_timer(&bp->timer, jiffies + bp->current_interval);
5096}
5097
5098/* end of Statistics */
5099
5100/* nic init */
5101
5102/*
5103 * nic init service functions
5104 */
5105
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005106static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005107{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005108 int port = BP_PORT(bp);
5109
Eilon Greensteinca003922009-08-12 22:53:28 -07005110 /* "CSTORM" */
5111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5113 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5116 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005117}
5118
Eilon Greenstein5c862842008-08-13 15:51:48 -07005119static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5120 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005121{
5122 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005123 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005124 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005125 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005126
5127 /* USTORM */
5128 section = ((u64)mapping) + offsetof(struct host_status_block,
5129 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005130 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005131
Eilon Greensteinca003922009-08-12 22:53:28 -07005132 REG_WR(bp, BAR_CSTRORM_INTMEM +
5133 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5134 REG_WR(bp, BAR_CSTRORM_INTMEM +
5135 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07005137 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5138 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005139
5140 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07005141 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5142 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005143
5144 /* CSTORM */
5145 section = ((u64)mapping) + offsetof(struct host_status_block,
5146 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005147 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005148
5149 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005150 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005151 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005152 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005153 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005154 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07005155 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005156
5157 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5158 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005159 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005160
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005161 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5162}
5163
5164static void bnx2x_zero_def_sb(struct bnx2x *bp)
5165{
5166 int func = BP_FUNC(bp);
5167
Eilon Greensteinca003922009-08-12 22:53:28 -07005168 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005169 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5170 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07005171 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5173 sizeof(struct cstorm_def_status_block_u)/4);
5174 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5176 sizeof(struct cstorm_def_status_block_c)/4);
5177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00005178 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5179 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005180}
5181
5182static void bnx2x_init_def_sb(struct bnx2x *bp,
5183 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005184 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005185{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005186 int port = BP_PORT(bp);
5187 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005188 int index, val, reg_offset;
5189 u64 section;
5190
5191 /* ATTN */
5192 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5193 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005194 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005195
Eliezer Tamir49d66772008-02-28 11:53:13 -08005196 bp->attn_state = 0;
5197
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005198 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5199 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5200
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005201 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005202 bp->attn_group[index].sig[0] = REG_RD(bp,
5203 reg_offset + 0x10*index);
5204 bp->attn_group[index].sig[1] = REG_RD(bp,
5205 reg_offset + 0x4 + 0x10*index);
5206 bp->attn_group[index].sig[2] = REG_RD(bp,
5207 reg_offset + 0x8 + 0x10*index);
5208 bp->attn_group[index].sig[3] = REG_RD(bp,
5209 reg_offset + 0xc + 0x10*index);
5210 }
5211
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005212 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5213 HC_REG_ATTN_MSG0_ADDR_L);
5214
5215 REG_WR(bp, reg_offset, U64_LO(section));
5216 REG_WR(bp, reg_offset + 4, U64_HI(section));
5217
5218 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5219
5220 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005221 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005222 REG_WR(bp, reg_offset, val);
5223
5224 /* USTORM */
5225 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5226 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005227 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005228
Eilon Greensteinca003922009-08-12 22:53:28 -07005229 REG_WR(bp, BAR_CSTRORM_INTMEM +
5230 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5231 REG_WR(bp, BAR_CSTRORM_INTMEM +
5232 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005233 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07005234 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5235 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005236
5237 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07005238 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5239 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005240
5241 /* CSTORM */
5242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5243 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005244 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005245
5246 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005247 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005249 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005250 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07005251 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07005252 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005253
5254 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5255 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005256 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005257
5258 /* TSTORM */
5259 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5260 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005261 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005262
5263 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005264 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005265 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005266 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005267 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07005268 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005269 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005270
5271 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5272 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005273 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005274
5275 /* XSTORM */
5276 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5277 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005278 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005279
5280 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005281 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005282 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005283 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005284 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07005285 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005286 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005287
5288 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5289 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005290 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005291
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005292 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005293 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005294
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005295 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005296}
5297
5298static void bnx2x_update_coalesce(struct bnx2x *bp)
5299{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005300 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005301 int i;
5302
5303 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005304 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305
5306 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07005307 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5308 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5309 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005310 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07005311 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5312 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5313 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005314 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005315
5316 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5317 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005318 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5319 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005320 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005321 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005322 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5323 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005324 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325 }
5326}
5327
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005328static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5329 struct bnx2x_fastpath *fp, int last)
5330{
5331 int i;
5332
5333 for (i = 0; i < last; i++) {
5334 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5335 struct sk_buff *skb = rx_buf->skb;
5336
5337 if (skb == NULL) {
5338 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5339 continue;
5340 }
5341
5342 if (fp->tpa_state[i] == BNX2X_TPA_START)
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005343 dma_unmap_single(&bp->pdev->dev,
5344 dma_unmap_addr(rx_buf, mapping),
5345 bp->rx_buf_size, DMA_FROM_DEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005346
5347 dev_kfree_skb(skb);
5348 rx_buf->skb = NULL;
5349 }
5350}
5351
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005352static void bnx2x_init_rx_rings(struct bnx2x *bp)
5353{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005354 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07005355 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5356 ETH_MAX_AGGREGATION_QUEUES_E1H;
5357 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005358 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005359
Eilon Greenstein87942b42009-02-12 08:36:49 +00005360 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00005361 DP(NETIF_MSG_IFUP,
5362 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005363
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005364 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005365
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005366 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07005367 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005368
Eilon Greenstein32626232008-08-13 15:51:07 -07005369 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005370 fp->tpa_pool[i].skb =
5371 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5372 if (!fp->tpa_pool[i].skb) {
5373 BNX2X_ERR("Failed to allocate TPA "
5374 "skb pool for queue[%d] - "
5375 "disabling TPA on this "
5376 "queue!\n", j);
5377 bnx2x_free_tpa_pool(bp, fp, i);
5378 fp->disable_tpa = 1;
5379 break;
5380 }
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005381 dma_unmap_addr_set((struct sw_rx_bd *)
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005382 &bp->fp->tpa_pool[i],
5383 mapping, 0);
5384 fp->tpa_state[i] = BNX2X_TPA_STOP;
5385 }
5386 }
5387 }
5388
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005389 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005390 struct bnx2x_fastpath *fp = &bp->fp[j];
5391
5392 fp->rx_bd_cons = 0;
5393 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005394 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005395
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005396 /* "next page" elements initialization */
5397 /* SGE ring */
5398 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5399 struct eth_rx_sge *sge;
5400
5401 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5402 sge->addr_hi =
5403 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5404 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5405 sge->addr_lo =
5406 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5407 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5408 }
5409
5410 bnx2x_init_sge_ring_bit_mask(fp);
5411
5412 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005413 for (i = 1; i <= NUM_RX_RINGS; i++) {
5414 struct eth_rx_bd *rx_bd;
5415
5416 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5417 rx_bd->addr_hi =
5418 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005419 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005420 rx_bd->addr_lo =
5421 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005422 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005423 }
5424
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005425 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005426 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5427 struct eth_rx_cqe_next_page *nextpg;
5428
5429 nextpg = (struct eth_rx_cqe_next_page *)
5430 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5431 nextpg->addr_hi =
5432 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005433 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005434 nextpg->addr_lo =
5435 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005436 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005437 }
5438
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005439 /* Allocate SGEs and initialize the ring elements */
5440 for (i = 0, ring_prod = 0;
5441 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005442
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005443 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5444 BNX2X_ERR("was only able to allocate "
5445 "%d rx sges\n", i);
5446 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5447 /* Cleanup already allocated elements */
5448 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005449 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005450 fp->disable_tpa = 1;
5451 ring_prod = 0;
5452 break;
5453 }
5454 ring_prod = NEXT_SGE_IDX(ring_prod);
5455 }
5456 fp->rx_sge_prod = ring_prod;
5457
5458 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005459 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005460 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005461 for (i = 0; i < bp->rx_ring_size; i++) {
5462 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5463 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005464 "%d rx skbs on queue[%d]\n", i, j);
5465 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005466 break;
5467 }
5468 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005469 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005470 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005471 }
5472
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005473 fp->rx_bd_prod = ring_prod;
5474 /* must not have more available CQEs than BDs */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005475 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5476 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005477 fp->rx_pkt = fp->rx_calls = 0;
5478
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005479 /* Warning!
5480 * this will generate an interrupt (to the TSTORM)
5481 * must only be done after chip is initialized
5482 */
5483 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5484 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005485 if (j != 0)
5486 continue;
5487
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005489 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005490 U64_LO(fp->rx_comp_mapping));
5491 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005492 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005493 U64_HI(fp->rx_comp_mapping));
5494 }
5495}
5496
5497static void bnx2x_init_tx_ring(struct bnx2x *bp)
5498{
5499 int i, j;
5500
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005501 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005502 struct bnx2x_fastpath *fp = &bp->fp[j];
5503
5504 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005505 struct eth_tx_next_bd *tx_next_bd =
5506 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005507
Eilon Greensteinca003922009-08-12 22:53:28 -07005508 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005509 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005510 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005511 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005512 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005513 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005514 }
5515
Eilon Greensteinca003922009-08-12 22:53:28 -07005516 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5517 fp->tx_db.data.zero_fill1 = 0;
5518 fp->tx_db.data.prod = 0;
5519
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005520 fp->tx_pkt_prod = 0;
5521 fp->tx_pkt_cons = 0;
5522 fp->tx_bd_prod = 0;
5523 fp->tx_bd_cons = 0;
5524 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5525 fp->tx_pkt = 0;
5526 }
5527}
5528
5529static void bnx2x_init_sp_ring(struct bnx2x *bp)
5530{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005531 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005532
5533 spin_lock_init(&bp->spq_lock);
5534
5535 bp->spq_left = MAX_SPQ_PENDING;
5536 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005537 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5538 bp->spq_prod_bd = bp->spq;
5539 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005541 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005542 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005543 REG_WR(bp,
5544 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005545 U64_HI(bp->spq_mapping));
5546
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005547 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005548 bp->spq_prod_idx);
5549}
5550
5551static void bnx2x_init_context(struct bnx2x *bp)
5552{
5553 int i;
5554
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005555 /* Rx */
5556 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005557 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5558 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005559 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005560
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005561 context->ustorm_st_context.common.sb_index_numbers =
5562 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005563 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005564 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005565 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005566 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5567 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5568 context->ustorm_st_context.common.statistics_counter_id =
5569 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005570 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005571 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005572 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005573 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005574 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005575 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005576 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005577 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005578 if (!fp->disable_tpa) {
5579 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005580 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005581 context->ustorm_st_context.common.sge_buff_size =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005582 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5583 0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005584 context->ustorm_st_context.common.sge_page_base_hi =
5585 U64_HI(fp->rx_sge_mapping);
5586 context->ustorm_st_context.common.sge_page_base_lo =
5587 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005588
5589 context->ustorm_st_context.common.max_sges_for_packet =
5590 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5591 context->ustorm_st_context.common.max_sges_for_packet =
5592 ((context->ustorm_st_context.common.
5593 max_sges_for_packet + PAGES_PER_SGE - 1) &
5594 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005595 }
5596
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005597 context->ustorm_ag_context.cdu_usage =
5598 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5599 CDU_REGION_NUMBER_UCM_AG,
5600 ETH_CONNECTION_TYPE);
5601
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005602 context->xstorm_ag_context.cdu_reserved =
5603 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5604 CDU_REGION_NUMBER_XCM_AG,
5605 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005606 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005607
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005608 /* Tx */
5609 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005610 struct bnx2x_fastpath *fp = &bp->fp[i];
5611 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005612 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005613
5614 context->cstorm_st_context.sb_index_number =
5615 C_SB_ETH_TX_CQ_INDEX;
5616 context->cstorm_st_context.status_block_id = fp->sb_id;
5617
5618 context->xstorm_st_context.tx_bd_page_base_hi =
5619 U64_HI(fp->tx_desc_mapping);
5620 context->xstorm_st_context.tx_bd_page_base_lo =
5621 U64_LO(fp->tx_desc_mapping);
5622 context->xstorm_st_context.statistics_data = (fp->cl_id |
5623 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5624 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005625}
5626
5627static void bnx2x_init_ind_table(struct bnx2x *bp)
5628{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005629 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005630 int i;
5631
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005632 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005633 return;
5634
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005635 DP(NETIF_MSG_IFUP,
5636 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005637 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005638 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005639 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005640 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005641}
5642
Eliezer Tamir49d66772008-02-28 11:53:13 -08005643static void bnx2x_set_client_config(struct bnx2x *bp)
5644{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005645 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005646 int port = BP_PORT(bp);
5647 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005648
Eilon Greensteine7799c52009-01-14 21:30:27 -08005649 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005650 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005651 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5652 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005653#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005654 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005655 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005656 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005657 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5658 }
5659#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005660
5661 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005662 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5663
Eliezer Tamir49d66772008-02-28 11:53:13 -08005664 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005665 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005666 ((u32 *)&tstorm_client)[0]);
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005668 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005669 ((u32 *)&tstorm_client)[1]);
5670 }
5671
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005672 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5673 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005674}
5675
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005676static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5677{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005678 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005679 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005680 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005681 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005682 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005683 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005684 /* All but management unicast packets should pass to the host as well */
5685 u32 llh_mask =
5686 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5687 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5688 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5689 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005690
Eilon Greenstein3196a882008-08-13 15:58:49 -07005691 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005692
5693 switch (mode) {
5694 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005695 tstorm_mac_filter.ucast_drop_all = mask;
5696 tstorm_mac_filter.mcast_drop_all = mask;
5697 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005698 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005699
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005700 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005701 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005702 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005703
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005704 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005705 tstorm_mac_filter.mcast_accept_all = mask;
5706 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005707 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005709 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005710 tstorm_mac_filter.ucast_accept_all = mask;
5711 tstorm_mac_filter.mcast_accept_all = mask;
5712 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005713 /* pass management unicast packets as well */
5714 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005715 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005716
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005717 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005718 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5719 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005720 }
5721
Eilon Greenstein581ce432009-07-29 00:20:04 +00005722 REG_WR(bp,
5723 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5724 llh_mask);
5725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005726 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5727 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005728 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005729 ((u32 *)&tstorm_mac_filter)[i]);
5730
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005731/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005732 ((u32 *)&tstorm_mac_filter)[i]); */
5733 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005734
Eliezer Tamir49d66772008-02-28 11:53:13 -08005735 if (mode != BNX2X_RX_MODE_NONE)
5736 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005737}
5738
Eilon Greenstein471de712008-08-13 15:49:35 -07005739static void bnx2x_init_internal_common(struct bnx2x *bp)
5740{
5741 int i;
5742
5743 /* Zero this manually as its initialization is
5744 currently missing in the initTool */
5745 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5746 REG_WR(bp, BAR_USTRORM_INTMEM +
5747 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5748}
5749
5750static void bnx2x_init_internal_port(struct bnx2x *bp)
5751{
5752 int port = BP_PORT(bp);
5753
Eilon Greensteinca003922009-08-12 22:53:28 -07005754 REG_WR(bp,
5755 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5756 REG_WR(bp,
5757 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005758 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5759 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5760}
5761
5762static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005763{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005764 struct tstorm_eth_function_common_config tstorm_config = {0};
5765 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005766 int port = BP_PORT(bp);
5767 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005768 int i, j;
5769 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005770 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005771
Tom Herbertc68ed252010-04-23 00:10:52 -07005772 tstorm_config.config_flags = RSS_FLAGS(bp);
5773
5774 if (is_multi(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005775 tstorm_config.rss_result_mask = MULTI_MASK;
Eilon Greensteinca003922009-08-12 22:53:28 -07005776
5777 /* Enable TPA if needed */
5778 if (bp->flags & TPA_ENABLE_FLAG)
5779 tstorm_config.config_flags |=
5780 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5781
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005782 if (IS_E1HMF(bp))
5783 tstorm_config.config_flags |=
5784 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005785
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005786 tstorm_config.leading_client_id = BP_L_ID(bp);
5787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005788 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005789 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005790 (*(u32 *)&tstorm_config));
5791
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005792 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005793 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005794 bnx2x_set_storm_rx_mode(bp);
5795
Eilon Greensteinde832a52009-02-12 08:36:33 +00005796 for_each_queue(bp, i) {
5797 u8 cl_id = bp->fp[i].cl_id;
5798
5799 /* reset xstorm per client statistics */
5800 offset = BAR_XSTRORM_INTMEM +
5801 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802 for (j = 0;
5803 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5804 REG_WR(bp, offset + j*4, 0);
5805
5806 /* reset tstorm per client statistics */
5807 offset = BAR_TSTRORM_INTMEM +
5808 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5809 for (j = 0;
5810 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5811 REG_WR(bp, offset + j*4, 0);
5812
5813 /* reset ustorm per client statistics */
5814 offset = BAR_USTRORM_INTMEM +
5815 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5816 for (j = 0;
5817 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5818 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005819 }
5820
5821 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005822 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005823
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005824 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005825 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005826 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005827 ((u32 *)&stats_flags)[1]);
5828
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005829 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005830 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005831 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005832 ((u32 *)&stats_flags)[1]);
5833
Eilon Greensteinde832a52009-02-12 08:36:33 +00005834 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5835 ((u32 *)&stats_flags)[0]);
5836 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5837 ((u32 *)&stats_flags)[1]);
5838
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005839 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005840 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005841 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005842 ((u32 *)&stats_flags)[1]);
5843
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005844 REG_WR(bp, BAR_XSTRORM_INTMEM +
5845 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847 REG_WR(bp, BAR_XSTRORM_INTMEM +
5848 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
5851 REG_WR(bp, BAR_TSTRORM_INTMEM +
5852 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5853 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5854 REG_WR(bp, BAR_TSTRORM_INTMEM +
5855 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5856 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005857
Eilon Greensteinde832a52009-02-12 08:36:33 +00005858 REG_WR(bp, BAR_USTRORM_INTMEM +
5859 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5860 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5861 REG_WR(bp, BAR_USTRORM_INTMEM +
5862 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5863 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5864
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005865 if (CHIP_IS_E1H(bp)) {
5866 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5867 IS_E1HMF(bp));
5868 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5869 IS_E1HMF(bp));
5870 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5871 IS_E1HMF(bp));
5872 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5873 IS_E1HMF(bp));
5874
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005875 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5876 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005877 }
5878
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005879 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005880 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5881 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005882 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005883 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005884
5885 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005886 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005887 U64_LO(fp->rx_comp_mapping));
5888 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005889 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005890 U64_HI(fp->rx_comp_mapping));
5891
Eilon Greensteinca003922009-08-12 22:53:28 -07005892 /* Next page */
5893 REG_WR(bp, BAR_USTRORM_INTMEM +
5894 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5895 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5896 REG_WR(bp, BAR_USTRORM_INTMEM +
5897 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5898 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5899
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005900 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005901 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005902 max_agg_size);
5903 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005904
Eilon Greenstein1c063282009-02-12 08:36:43 +00005905 /* dropless flow control */
5906 if (CHIP_IS_E1H(bp)) {
5907 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5908
5909 rx_pause.bd_thr_low = 250;
5910 rx_pause.cqe_thr_low = 250;
5911 rx_pause.cos = 1;
5912 rx_pause.sge_thr_low = 0;
5913 rx_pause.bd_thr_high = 350;
5914 rx_pause.cqe_thr_high = 350;
5915 rx_pause.sge_thr_high = 0;
5916
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005917 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005918 struct bnx2x_fastpath *fp = &bp->fp[i];
5919
5920 if (!fp->disable_tpa) {
5921 rx_pause.sge_thr_low = 150;
5922 rx_pause.sge_thr_high = 250;
5923 }
5924
5925
5926 offset = BAR_USTRORM_INTMEM +
5927 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5928 fp->cl_id);
5929 for (j = 0;
5930 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5931 j++)
5932 REG_WR(bp, offset + j*4,
5933 ((u32 *)&rx_pause)[j]);
5934 }
5935 }
5936
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005937 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5938
5939 /* Init rate shaping and fairness contexts */
5940 if (IS_E1HMF(bp)) {
5941 int vn;
5942
5943 /* During init there is no active link
5944 Until link is up, set link rate to 10Gbps */
5945 bp->link_vars.line_speed = SPEED_10000;
5946 bnx2x_init_port_minmax(bp);
5947
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005948 if (!BP_NOMCP(bp))
5949 bp->mf_config =
5950 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005951 bnx2x_calc_vn_weight_sum(bp);
5952
5953 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5954 bnx2x_init_vn_minmax(bp, 2*vn + port);
5955
5956 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005957 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005958 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005959
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005960 } else {
5961 /* rate shaping and fairness are disabled */
5962 DP(NETIF_MSG_IFUP,
5963 "single function mode minmax will be disabled\n");
5964 }
5965
5966
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005967 /* Store cmng structures to internal memory */
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005968 if (bp->port.pmf)
5969 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5970 REG_WR(bp, BAR_XSTRORM_INTMEM +
5971 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5972 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005973}
5974
Eilon Greenstein471de712008-08-13 15:49:35 -07005975static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5976{
5977 switch (load_code) {
5978 case FW_MSG_CODE_DRV_LOAD_COMMON:
5979 bnx2x_init_internal_common(bp);
5980 /* no break */
5981
5982 case FW_MSG_CODE_DRV_LOAD_PORT:
5983 bnx2x_init_internal_port(bp);
5984 /* no break */
5985
5986 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5987 bnx2x_init_internal_func(bp);
5988 break;
5989
5990 default:
5991 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5992 break;
5993 }
5994}
5995
5996static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005997{
5998 int i;
5999
6000 for_each_queue(bp, i) {
6001 struct bnx2x_fastpath *fp = &bp->fp[i];
6002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006003 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006005 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006006 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00006007#ifdef BCM_CNIC
6008 fp->sb_id = fp->cl_id + 1;
6009#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006010 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00006011#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006012 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00006013 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6014 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07006015 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00006016 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07006017 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006018 }
6019
Eilon Greenstein16119782009-03-02 07:59:27 +00006020 /* ensure status block indices were read */
6021 rmb();
6022
6023
Eilon Greenstein5c862842008-08-13 15:51:48 -07006024 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6025 DEF_SB_ID);
6026 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006027 bnx2x_update_coalesce(bp);
6028 bnx2x_init_rx_rings(bp);
6029 bnx2x_init_tx_ring(bp);
6030 bnx2x_init_sp_ring(bp);
6031 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07006032 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006033 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08006034 bnx2x_stats_init(bp);
6035
6036 /* At this point, we are ready for interrupts */
6037 atomic_set(&bp->intr_sem, 0);
6038
6039 /* flush all before enabling interrupts */
6040 mb();
6041 mmiowb();
6042
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08006043 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00006044
6045 /* Check for SPIO5 */
6046 bnx2x_attn_int_deasserted0(bp,
6047 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6048 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006049}
6050
6051/* end of nic init */
6052
6053/*
6054 * gzip service functions
6055 */
6056
6057static int bnx2x_gunzip_init(struct bnx2x *bp)
6058{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006059 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6060 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006061 if (bp->gunzip_buf == NULL)
6062 goto gunzip_nomem1;
6063
6064 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6065 if (bp->strm == NULL)
6066 goto gunzip_nomem2;
6067
6068 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6069 GFP_KERNEL);
6070 if (bp->strm->workspace == NULL)
6071 goto gunzip_nomem3;
6072
6073 return 0;
6074
6075gunzip_nomem3:
6076 kfree(bp->strm);
6077 bp->strm = NULL;
6078
6079gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006080 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6081 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006082 bp->gunzip_buf = NULL;
6083
6084gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006085 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6086 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006087 return -ENOMEM;
6088}
6089
6090static void bnx2x_gunzip_end(struct bnx2x *bp)
6091{
6092 kfree(bp->strm->workspace);
6093
6094 kfree(bp->strm);
6095 bp->strm = NULL;
6096
6097 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006098 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6099 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006100 bp->gunzip_buf = NULL;
6101 }
6102}
6103
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006104static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006105{
6106 int n, rc;
6107
6108 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006109 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6110 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006112 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113
6114 n = 10;
6115
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006116#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117
6118 if (zbuf[3] & FNAME)
6119 while ((zbuf[n++] != 0) && (n < len));
6120
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006121 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122 bp->strm->avail_in = len - n;
6123 bp->strm->next_out = bp->gunzip_buf;
6124 bp->strm->avail_out = FW_BUF_SIZE;
6125
6126 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6127 if (rc != Z_OK)
6128 return rc;
6129
6130 rc = zlib_inflate(bp->strm, Z_FINISH);
6131 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00006132 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6133 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006134
6135 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6136 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006137 netdev_err(bp->dev, "Firmware decompression error:"
6138 " gunzip_outlen (%d) not aligned\n",
6139 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006140 bp->gunzip_outlen >>= 2;
6141
6142 zlib_inflateEnd(bp->strm);
6143
6144 if (rc == Z_STREAM_END)
6145 return 0;
6146
6147 return rc;
6148}
6149
6150/* nic load/unload */
6151
6152/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006153 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006154 */
6155
6156/* send a NIG loopback debug packet */
6157static void bnx2x_lb_pckt(struct bnx2x *bp)
6158{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006159 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006160
6161 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006162 wb_write[0] = 0x55555555;
6163 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006164 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006165 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006166
6167 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006168 wb_write[0] = 0x09000000;
6169 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006170 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006171 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006172}
6173
6174/* some of the internal memories
6175 * are not directly readable from the driver
6176 * to test them we send debug packets
6177 */
6178static int bnx2x_int_mem_test(struct bnx2x *bp)
6179{
6180 int factor;
6181 int count, i;
6182 u32 val = 0;
6183
Eilon Greensteinad8d3942008-06-23 20:29:02 -07006184 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006185 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07006186 else if (CHIP_REV_IS_EMUL(bp))
6187 factor = 200;
6188 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006189 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006190
6191 DP(NETIF_MSG_HW, "start part1\n");
6192
6193 /* Disable inputs of parser neighbor blocks */
6194 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6195 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6196 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006197 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006198
6199 /* Write 0 to parser credits for CFC search request */
6200 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6201
6202 /* send Ethernet packet */
6203 bnx2x_lb_pckt(bp);
6204
6205 /* TODO do i reset NIG statistic? */
6206 /* Wait until NIG register shows 1 packet of size 0x10 */
6207 count = 1000 * factor;
6208 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006209
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006210 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6211 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006212 if (val == 0x10)
6213 break;
6214
6215 msleep(10);
6216 count--;
6217 }
6218 if (val != 0x10) {
6219 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6220 return -1;
6221 }
6222
6223 /* Wait until PRS register shows 1 packet */
6224 count = 1000 * factor;
6225 while (count) {
6226 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006227 if (val == 1)
6228 break;
6229
6230 msleep(10);
6231 count--;
6232 }
6233 if (val != 0x1) {
6234 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6235 return -2;
6236 }
6237
6238 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006239 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006240 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006241 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006242 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006243 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6244 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006245
6246 DP(NETIF_MSG_HW, "part2\n");
6247
6248 /* Disable inputs of parser neighbor blocks */
6249 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6250 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6251 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006252 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006253
6254 /* Write 0 to parser credits for CFC search request */
6255 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6256
6257 /* send 10 Ethernet packets */
6258 for (i = 0; i < 10; i++)
6259 bnx2x_lb_pckt(bp);
6260
6261 /* Wait until NIG register shows 10 + 1
6262 packets of size 11*0x10 = 0xb0 */
6263 count = 1000 * factor;
6264 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006265
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006266 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006268 if (val == 0xb0)
6269 break;
6270
6271 msleep(10);
6272 count--;
6273 }
6274 if (val != 0xb0) {
6275 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6276 return -3;
6277 }
6278
6279 /* Wait until PRS register shows 2 packets */
6280 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6281 if (val != 2)
6282 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6283
6284 /* Write 1 to parser credits for CFC search request */
6285 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6286
6287 /* Wait until PRS register shows 3 packets */
6288 msleep(10 * factor);
6289 /* Wait until NIG register shows 1 packet of size 0x10 */
6290 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6291 if (val != 3)
6292 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6293
6294 /* clear NIG EOP FIFO */
6295 for (i = 0; i < 11; i++)
6296 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6297 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6298 if (val != 1) {
6299 BNX2X_ERR("clear of NIG failed\n");
6300 return -4;
6301 }
6302
6303 /* Reset and init BRB, PRS, NIG */
6304 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6305 msleep(50);
6306 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6307 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006308 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6309 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006310#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006311 /* set NIC mode */
6312 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6313#endif
6314
6315 /* Enable inputs of parser neighbor blocks */
6316 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6317 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6318 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006319 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006320
6321 DP(NETIF_MSG_HW, "done\n");
6322
6323 return 0; /* OK */
6324}
6325
6326static void enable_blocks_attention(struct bnx2x *bp)
6327{
6328 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6329 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6330 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6331 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6332 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6333 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6334 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6335 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6336 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006337/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6338/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006339 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6340 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6341 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006342/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6343/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006344 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6345 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6346 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6347 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006348/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6349/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6350 if (CHIP_REV_IS_FPGA(bp))
6351 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6352 else
6353 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006354 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6355 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6356 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006357/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6358/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006359 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6360 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006361/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6362 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006363}
6364
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006365static const struct {
6366 u32 addr;
6367 u32 mask;
6368} bnx2x_parity_mask[] = {
6369 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6370 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6371 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6372 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6373 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6374 {QM_REG_QM_PRTY_MASK, 0x0},
6375 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6376 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6377 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6378 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6379 {CDU_REG_CDU_PRTY_MASK, 0x0},
6380 {CFC_REG_CFC_PRTY_MASK, 0x0},
6381 {DBG_REG_DBG_PRTY_MASK, 0x0},
6382 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6383 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6384 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6385 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6386 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6387 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6388 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6389 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6390 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6391 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6392 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6393 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6394 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6395 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6396 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6397};
6398
6399static void enable_blocks_parity(struct bnx2x *bp)
6400{
6401 int i, mask_arr_len =
6402 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6403
6404 for (i = 0; i < mask_arr_len; i++)
6405 REG_WR(bp, bnx2x_parity_mask[i].addr,
6406 bnx2x_parity_mask[i].mask);
6407}
6408
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006409
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006410static void bnx2x_reset_common(struct bnx2x *bp)
6411{
6412 /* reset_common */
6413 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6414 0xd3ffff7f);
6415 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6416}
6417
Eilon Greenstein573f2032009-08-12 08:24:14 +00006418static void bnx2x_init_pxp(struct bnx2x *bp)
6419{
6420 u16 devctl;
6421 int r_order, w_order;
6422
6423 pci_read_config_word(bp->pdev,
6424 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6425 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6426 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6427 if (bp->mrrs == -1)
6428 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6429 else {
6430 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6431 r_order = bp->mrrs;
6432 }
6433
6434 bnx2x_init_pxp_arb(bp, r_order, w_order);
6435}
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006436
6437static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6438{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00006439 int is_required;
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006440 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00006441 int port;
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006442
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00006443 if (BP_NOMCP(bp))
6444 return;
6445
6446 is_required = 0;
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006447 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6448 SHARED_HW_CFG_FAN_FAILURE_MASK;
6449
6450 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6451 is_required = 1;
6452
6453 /*
6454 * The fan failure mechanism is usually related to the PHY type since
6455 * the power consumption of the board is affected by the PHY. Currently,
6456 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6457 */
6458 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6459 for (port = PORT_0; port < PORT_MAX; port++) {
6460 u32 phy_type =
6461 SHMEM_RD(bp, dev_info.port_hw_config[port].
6462 external_phy_config) &
6463 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6464 is_required |=
6465 ((phy_type ==
6466 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6467 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6469 (phy_type ==
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6471 }
6472
6473 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6474
6475 if (is_required == 0)
6476 return;
6477
6478 /* Fan failure is indicated by SPIO 5 */
6479 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6480 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6481
6482 /* set to active low mode */
6483 val = REG_RD(bp, MISC_REG_SPIO_INT);
6484 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006485 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006486 REG_WR(bp, MISC_REG_SPIO_INT, val);
6487
6488 /* enable interrupt to signal the IGU */
6489 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6490 val |= (1 << MISC_REGISTERS_SPIO_5);
6491 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6492}
6493
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006494static int bnx2x_init_common(struct bnx2x *bp)
6495{
6496 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006497#ifdef BCM_CNIC
6498 u32 wb_write[2];
6499#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006500
6501 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6502
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006503 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006504 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6505 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6506
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006507 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006508 if (CHIP_IS_E1H(bp))
6509 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6510
6511 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6512 msleep(30);
6513 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6514
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006515 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006516 if (CHIP_IS_E1(bp)) {
6517 /* enable HW interrupt from PXP on USDM overflow
6518 bit 16 on INT_MASK_0 */
6519 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006520 }
6521
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006522 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006523 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006524
6525#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006526 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6527 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6528 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6529 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6530 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006531 /* make sure this value is 0 */
6532 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006533
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006534/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6535 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6536 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6537 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6538 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006539#endif
6540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006541 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006542#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006543 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6544 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6545 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006546#endif
6547
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006548 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6549 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006550
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006551 /* let the HW do it's magic ... */
6552 msleep(100);
6553 /* finish PXP init */
6554 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6555 if (val != 1) {
6556 BNX2X_ERR("PXP2 CFG failed\n");
6557 return -EBUSY;
6558 }
6559 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6560 if (val != 1) {
6561 BNX2X_ERR("PXP2 RD_INIT failed\n");
6562 return -EBUSY;
6563 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006564
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006565 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6566 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006567
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006568 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006569
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006570 /* clean the DMAE memory */
6571 bp->dmae_ready = 1;
6572 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006573
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006574 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6575 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6576 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6577 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006578
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006579 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6580 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6581 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6582 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6583
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006584 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006585
6586#ifdef BCM_CNIC
6587 wb_write[0] = 0;
6588 wb_write[1] = 0;
6589 for (i = 0; i < 64; i++) {
6590 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6591 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6592
6593 if (CHIP_IS_E1H(bp)) {
6594 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6595 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6596 wb_write, 2);
6597 }
6598 }
6599#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006600 /* soft reset pulse */
6601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006603
Michael Chan37b091b2009-10-10 13:46:55 +00006604#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006606#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006607
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6610 if (!CHIP_REV_IS_SLOW(bp)) {
6611 /* enable hw interrupt from doorbell Q */
6612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6613 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006614
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006618#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006619 /* set NIC mode */
6620 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006621#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006622 if (CHIP_IS_E1H(bp))
6623 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006624
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006625 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6627 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6628 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006629
Eilon Greensteinca003922009-08-12 22:53:28 -07006630 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6631 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6632 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6633 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006634
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006635 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6637 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6638 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006639
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006640 /* sync semi rtc */
6641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6642 0x80000000);
6643 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6644 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006645
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006646 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6647 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6648 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006649
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006650 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07006651 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6652 REG_WR(bp, i, random32());
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006654#ifdef BCM_CNIC
6655 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6656 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6657 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6658 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6659 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6660 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6661 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6662 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6663 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6664 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6665#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006668 if (sizeof(union cdu_context) != 1024)
6669 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006670 dev_alert(&bp->pdev->dev, "please adjust the size "
6671 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00006672 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006673
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006674 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006675 val = (4 << 24) + (0 << 12) + 1024;
6676 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006677
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006678 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006679 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006680 /* enable context validation interrupt from CFC */
6681 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6682
6683 /* set the thresholds to prevent CFC/CDU race */
6684 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006685
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006686 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6687 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006688
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006689 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006690 /* Reset PCIE errors for debug */
6691 REG_WR(bp, 0x2814, 0xffffffff);
6692 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006693
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006694 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006695 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006696 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006697 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006698
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006699 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006700 if (CHIP_IS_E1H(bp)) {
6701 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6702 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6703 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006704
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006705 if (CHIP_REV_IS_SLOW(bp))
6706 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006708 /* finish CFC init */
6709 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6710 if (val != 1) {
6711 BNX2X_ERR("CFC LL_INIT failed\n");
6712 return -EBUSY;
6713 }
6714 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6715 if (val != 1) {
6716 BNX2X_ERR("CFC AC_INIT failed\n");
6717 return -EBUSY;
6718 }
6719 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6720 if (val != 1) {
6721 BNX2X_ERR("CFC CAM_INIT failed\n");
6722 return -EBUSY;
6723 }
6724 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006725
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006726 /* read NIG statistic
6727 to see if this is our first up since powerup */
6728 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6729 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006730
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006731 /* do internal memory self test */
6732 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6733 BNX2X_ERR("internal mem self test failed\n");
6734 return -EBUSY;
6735 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006736
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006737 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006742 bp->port.need_hw_lock = 1;
6743 break;
6744
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006745 default:
6746 break;
6747 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006748
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006749 bnx2x_setup_fan_failure_detection(bp);
6750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006751 /* clear PXP2 attentions */
6752 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006753
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006754 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006755 if (CHIP_PARITY_SUPPORTED(bp))
6756 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006757
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006758 if (!BP_NOMCP(bp)) {
6759 bnx2x_acquire_phy_lock(bp);
6760 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6761 bnx2x_release_phy_lock(bp);
6762 } else
6763 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6764
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006765 return 0;
6766}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006767
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006768static int bnx2x_init_port(struct bnx2x *bp)
6769{
6770 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006771 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006772 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006773 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006774
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006775 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006776
6777 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006778
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006779 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006780 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006781
6782 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6783 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6784 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006785 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786
Michael Chan37b091b2009-10-10 13:46:55 +00006787#ifdef BCM_CNIC
6788 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006790 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006793#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006794
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006795 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006796
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006797 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006798 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6799 /* no pause for emulation and FPGA */
6800 low = 0;
6801 high = 513;
6802 } else {
6803 if (IS_E1HMF(bp))
6804 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6805 else if (bp->dev->mtu > 4096) {
6806 if (bp->flags & ONE_PORT_FLAG)
6807 low = 160;
6808 else {
6809 val = bp->dev->mtu;
6810 /* (24*1024 + val*4)/256 */
6811 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6812 }
6813 } else
6814 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6815 high = low + 56; /* 14*1024/256 */
6816 }
6817 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6818 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6819
6820
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006821 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006822
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006823 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006824 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006825 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006826 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006827
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006828 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6829 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6830 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6831 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006832
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006833 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006834 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006835
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006836 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006837
6838 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006839 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006840
6841 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006842 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006843 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006844 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006845
6846 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006847 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006848 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006849 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006850
Michael Chan37b091b2009-10-10 13:46:55 +00006851#ifdef BCM_CNIC
6852 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006853#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006854 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006855 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006856
6857 if (CHIP_IS_E1(bp)) {
6858 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6859 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6860 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006861 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006862
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006863 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006864 /* init aeu_mask_attn_func_0/1:
6865 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6866 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6867 * bits 4-7 are used for "per vn group attention" */
6868 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6869 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6870
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006871 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006872 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006873 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006874 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006875 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006876
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006877 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006878
6879 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6880
6881 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006882 /* 0x2 disable e1hov, 0x1 enable */
6883 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6884 (IS_E1HMF(bp) ? 0x1 : 0x2));
6885
Eilon Greenstein1c063282009-02-12 08:36:43 +00006886 {
6887 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6888 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6889 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6890 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006891 }
6892
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006893 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006894 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006895
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006896 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006897 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6898 {
6899 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6900
6901 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6902 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6903
6904 /* The GPIO should be swapped if the swap register is
6905 set and active */
6906 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6908
6909 /* Select function upon port-swap configuration */
6910 if (port == 0) {
6911 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6912 aeu_gpio_mask = (swap_val && swap_override) ?
6913 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6915 } else {
6916 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6917 aeu_gpio_mask = (swap_val && swap_override) ?
6918 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6919 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6920 }
6921 val = REG_RD(bp, offset);
6922 /* add GPIO3 to group */
6923 val |= aeu_gpio_mask;
6924 REG_WR(bp, offset, val);
6925 }
6926 break;
6927
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006928 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006929 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006930 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006931 {
6932 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6933 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6934 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006935 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006936 REG_WR(bp, reg_addr, val);
6937 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006938 break;
6939
6940 default:
6941 break;
6942 }
6943
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006944 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006945
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006946 return 0;
6947}
6948
6949#define ILT_PER_FUNC (768/2)
6950#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6951/* the phys address is shifted right 12 bits and has an added
6952 1=valid bit added to the 53rd bit
6953 then since this is a wide register(TM)
6954 we split it into two 32 bit writes
6955 */
6956#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6957#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6958#define PXP_ONE_ILT(x) (((x) << 10) | x)
6959#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6960
Michael Chan37b091b2009-10-10 13:46:55 +00006961#ifdef BCM_CNIC
6962#define CNIC_ILT_LINES 127
6963#define CNIC_CTX_PER_ILT 16
6964#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006965#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006966#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006967
6968static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6969{
6970 int reg;
6971
6972 if (CHIP_IS_E1H(bp))
6973 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6974 else /* E1 */
6975 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6976
6977 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6978}
6979
6980static int bnx2x_init_func(struct bnx2x *bp)
6981{
6982 int port = BP_PORT(bp);
6983 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006984 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006985 int i;
6986
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006987 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006988
Eilon Greenstein8badd272009-02-12 08:36:15 +00006989 /* set MSI reconfigure capability */
6990 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6991 val = REG_RD(bp, addr);
6992 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6993 REG_WR(bp, addr, val);
6994
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006995 i = FUNC_ILT_BASE(func);
6996
6997 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
7000 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
7001 } else /* E1 */
7002 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
7003 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
7004
Michael Chan37b091b2009-10-10 13:46:55 +00007005#ifdef BCM_CNIC
7006 i += 1 + CNIC_ILT_LINES;
7007 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
7008 if (CHIP_IS_E1(bp))
7009 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
7010 else {
7011 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7012 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7013 }
7014
7015 i++;
7016 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7017 if (CHIP_IS_E1(bp))
7018 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7019 else {
7020 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7021 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7022 }
7023
7024 i++;
7025 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7026 if (CHIP_IS_E1(bp))
7027 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7028 else {
7029 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7030 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7031 }
7032
7033 /* tell the searcher where the T2 table is */
7034 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7035
7036 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7037 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7038
7039 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7040 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7041 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7042
7043 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7044#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007045
7046 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00007047 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7048 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7049 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7050 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7051 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7052 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7053 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7054 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7055 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007056
7057 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7058 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7059 }
7060
7061 /* HC init per function */
7062 if (CHIP_IS_E1H(bp)) {
7063 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7064
7065 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7066 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7067 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007068 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007069
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007070 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007071 REG_WR(bp, 0x2114, 0xffffffff);
7072 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007073
7074 return 0;
7075}
7076
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007077static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7078{
7079 int i, rc = 0;
7080
7081 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7082 BP_FUNC(bp), load_code);
7083
7084 bp->dmae_ready = 0;
7085 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00007086 rc = bnx2x_gunzip_init(bp);
7087 if (rc)
7088 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007089
7090 switch (load_code) {
7091 case FW_MSG_CODE_DRV_LOAD_COMMON:
7092 rc = bnx2x_init_common(bp);
7093 if (rc)
7094 goto init_hw_err;
7095 /* no break */
7096
7097 case FW_MSG_CODE_DRV_LOAD_PORT:
7098 bp->dmae_ready = 1;
7099 rc = bnx2x_init_port(bp);
7100 if (rc)
7101 goto init_hw_err;
7102 /* no break */
7103
7104 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7105 bp->dmae_ready = 1;
7106 rc = bnx2x_init_func(bp);
7107 if (rc)
7108 goto init_hw_err;
7109 break;
7110
7111 default:
7112 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7113 break;
7114 }
7115
7116 if (!BP_NOMCP(bp)) {
7117 int func = BP_FUNC(bp);
7118
7119 bp->fw_drv_pulse_wr_seq =
7120 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7121 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00007122 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7123 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007124
7125 /* this needs to be done before gunzip end */
7126 bnx2x_zero_def_sb(bp);
7127 for_each_queue(bp, i)
7128 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00007129#ifdef BCM_CNIC
7130 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7131#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007132
7133init_hw_err:
7134 bnx2x_gunzip_end(bp);
7135
7136 return rc;
7137}
7138
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007139static void bnx2x_free_mem(struct bnx2x *bp)
7140{
7141
7142#define BNX2X_PCI_FREE(x, y, size) \
7143 do { \
7144 if (x) { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007145 dma_free_coherent(&bp->pdev->dev, size, x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007146 x = NULL; \
7147 y = 0; \
7148 } \
7149 } while (0)
7150
7151#define BNX2X_FREE(x) \
7152 do { \
7153 if (x) { \
7154 vfree(x); \
7155 x = NULL; \
7156 } \
7157 } while (0)
7158
7159 int i;
7160
7161 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007162 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007163 for_each_queue(bp, i) {
7164
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007165 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007166 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7167 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007168 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007169 }
7170 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007171 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007172
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007173 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007174 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7175 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7176 bnx2x_fp(bp, i, rx_desc_mapping),
7177 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7178
7179 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7180 bnx2x_fp(bp, i, rx_comp_mapping),
7181 sizeof(struct eth_fast_path_rx_cqe) *
7182 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007183
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007184 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07007185 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007186 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7187 bnx2x_fp(bp, i, rx_sge_mapping),
7188 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7189 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007190 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007191 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007192
7193 /* fastpath tx rings: tx_buf tx_desc */
7194 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7195 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7196 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007197 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007198 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007199 /* end of fastpath */
7200
7201 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007202 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007203
7204 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007205 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007206
Michael Chan37b091b2009-10-10 13:46:55 +00007207#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007208 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7209 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7210 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7211 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00007212 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7213 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007214#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007215 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007216
7217#undef BNX2X_PCI_FREE
7218#undef BNX2X_KFREE
7219}
7220
7221static int bnx2x_alloc_mem(struct bnx2x *bp)
7222{
7223
7224#define BNX2X_PCI_ALLOC(x, y, size) \
7225 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007226 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007227 if (x == NULL) \
7228 goto alloc_mem_err; \
7229 memset(x, 0, size); \
7230 } while (0)
7231
7232#define BNX2X_ALLOC(x, size) \
7233 do { \
7234 x = vmalloc(size); \
7235 if (x == NULL) \
7236 goto alloc_mem_err; \
7237 memset(x, 0, size); \
7238 } while (0)
7239
7240 int i;
7241
7242 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007243 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007244 for_each_queue(bp, i) {
7245 bnx2x_fp(bp, i, bp) = bp;
7246
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007247 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007248 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7249 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007250 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007251 }
7252 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007253 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007254
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007255 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007256 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7257 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7258 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7259 &bnx2x_fp(bp, i, rx_desc_mapping),
7260 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7261
7262 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7263 &bnx2x_fp(bp, i, rx_comp_mapping),
7264 sizeof(struct eth_fast_path_rx_cqe) *
7265 NUM_RCQ_BD);
7266
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007267 /* SGE ring */
7268 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7269 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7270 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7271 &bnx2x_fp(bp, i, rx_sge_mapping),
7272 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007273 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007274 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007275 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007276
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007277 /* fastpath tx rings: tx_buf tx_desc */
7278 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7279 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7281 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007282 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007283 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007284 /* end of fastpath */
7285
7286 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7287 sizeof(struct host_def_status_block));
7288
7289 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7290 sizeof(struct bnx2x_slowpath));
7291
Michael Chan37b091b2009-10-10 13:46:55 +00007292#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007293 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7294
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007295 /* allocate searcher T2 table
7296 we allocate 1/4 of alloc num for T2
7297 (which is not entered into the ILT) */
7298 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7299
Michael Chan37b091b2009-10-10 13:46:55 +00007300 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007301 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00007302 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007303
Michael Chan37b091b2009-10-10 13:46:55 +00007304 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007305 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7306
7307 /* QM queues (128*MAX_CONN) */
7308 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00007309
7310 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7311 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007312#endif
7313
7314 /* Slow path ring */
7315 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7316
7317 return 0;
7318
7319alloc_mem_err:
7320 bnx2x_free_mem(bp);
7321 return -ENOMEM;
7322
7323#undef BNX2X_PCI_ALLOC
7324#undef BNX2X_ALLOC
7325}
7326
7327static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7328{
7329 int i;
7330
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007331 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007332 struct bnx2x_fastpath *fp = &bp->fp[i];
7333
7334 u16 bd_cons = fp->tx_bd_cons;
7335 u16 sw_prod = fp->tx_pkt_prod;
7336 u16 sw_cons = fp->tx_pkt_cons;
7337
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007338 while (sw_cons != sw_prod) {
7339 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7340 sw_cons++;
7341 }
7342 }
7343}
7344
7345static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7346{
7347 int i, j;
7348
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007349 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350 struct bnx2x_fastpath *fp = &bp->fp[j];
7351
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007352 for (i = 0; i < NUM_RX_BD; i++) {
7353 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7354 struct sk_buff *skb = rx_buf->skb;
7355
7356 if (skb == NULL)
7357 continue;
7358
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007359 dma_unmap_single(&bp->pdev->dev,
7360 dma_unmap_addr(rx_buf, mapping),
7361 bp->rx_buf_size, DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007362
7363 rx_buf->skb = NULL;
7364 dev_kfree_skb(skb);
7365 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007366 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07007367 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7368 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007369 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370 }
7371}
7372
7373static void bnx2x_free_skbs(struct bnx2x *bp)
7374{
7375 bnx2x_free_tx_skbs(bp);
7376 bnx2x_free_rx_skbs(bp);
7377}
7378
7379static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7380{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007381 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007382
7383 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007384 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007385 bp->msix_table[0].vector);
7386
Michael Chan37b091b2009-10-10 13:46:55 +00007387#ifdef BCM_CNIC
7388 offset++;
7389#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007390 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007391 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007392 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007393 bnx2x_fp(bp, i, state));
7394
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007395 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007396 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007397}
7398
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007399static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007400{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007401 if (bp->flags & USING_MSIX_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007402 if (!disable_only)
7403 bnx2x_free_msix_irqs(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007404 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007405 bp->flags &= ~USING_MSIX_FLAG;
7406
Eilon Greenstein8badd272009-02-12 08:36:15 +00007407 } else if (bp->flags & USING_MSI_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007408 if (!disable_only)
7409 free_irq(bp->pdev->irq, bp->dev);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007410 pci_disable_msi(bp->pdev);
7411 bp->flags &= ~USING_MSI_FLAG;
7412
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007413 } else if (!disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007414 free_irq(bp->pdev->irq, bp->dev);
7415}
7416
7417static int bnx2x_enable_msix(struct bnx2x *bp)
7418{
Eilon Greenstein8badd272009-02-12 08:36:15 +00007419 int i, rc, offset = 1;
7420 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007421
Eilon Greenstein8badd272009-02-12 08:36:15 +00007422 bp->msix_table[0].entry = igu_vec;
7423 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007424
Michael Chan37b091b2009-10-10 13:46:55 +00007425#ifdef BCM_CNIC
7426 igu_vec = BP_L_ID(bp) + offset;
7427 bp->msix_table[1].entry = igu_vec;
7428 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7429 offset++;
7430#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007431 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007432 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007433 bp->msix_table[i + offset].entry = igu_vec;
7434 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7435 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007436 }
7437
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007439 BNX2X_NUM_QUEUES(bp) + offset);
Vladislav Zolotarov1ac218c2010-04-19 01:14:18 +00007440
7441 /*
7442 * reconfigure number of tx/rx queues according to available
7443 * MSI-X vectors
7444 */
7445 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7446 /* vectors available for FP */
7447 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7448
7449 DP(NETIF_MSG_IFUP,
7450 "Trying to use less MSI-X vectors: %d\n", rc);
7451
7452 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7453
7454 if (rc) {
7455 DP(NETIF_MSG_IFUP,
7456 "MSI-X is not attainable rc %d\n", rc);
7457 return rc;
7458 }
7459
7460 bp->num_queues = min(bp->num_queues, fp_vec);
7461
7462 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7463 bp->num_queues);
7464 } else if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007465 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7466 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007467 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007468
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007469 bp->flags |= USING_MSIX_FLAG;
7470
7471 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007472}
7473
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007474static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7475{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007476 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007477
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007478 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7479 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007480 if (rc) {
7481 BNX2X_ERR("request sp irq failed\n");
7482 return -EBUSY;
7483 }
7484
Michael Chan37b091b2009-10-10 13:46:55 +00007485#ifdef BCM_CNIC
7486 offset++;
7487#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007488 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007489 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007490 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7491 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007492
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007493 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007494 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007495 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007496 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007497 bnx2x_free_msix_irqs(bp);
7498 return -EBUSY;
7499 }
7500
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007501 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007502 }
7503
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007504 i = BNX2X_NUM_QUEUES(bp);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007505 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7506 " ... fp[%d] %d\n",
7507 bp->msix_table[0].vector,
7508 0, bp->msix_table[offset].vector,
7509 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007510
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007511 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007512}
7513
Eilon Greenstein8badd272009-02-12 08:36:15 +00007514static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007515{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007516 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007517
Eilon Greenstein8badd272009-02-12 08:36:15 +00007518 rc = pci_enable_msi(bp->pdev);
7519 if (rc) {
7520 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7521 return -1;
7522 }
7523 bp->flags |= USING_MSI_FLAG;
7524
7525 return 0;
7526}
7527
7528static int bnx2x_req_irq(struct bnx2x *bp)
7529{
7530 unsigned long flags;
7531 int rc;
7532
7533 if (bp->flags & USING_MSI_FLAG)
7534 flags = 0;
7535 else
7536 flags = IRQF_SHARED;
7537
7538 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007539 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007540 if (!rc)
7541 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7542
7543 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007544}
7545
Yitchak Gertner65abd742008-08-25 15:26:24 -07007546static void bnx2x_napi_enable(struct bnx2x *bp)
7547{
7548 int i;
7549
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007550 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007551 napi_enable(&bnx2x_fp(bp, i, napi));
7552}
7553
7554static void bnx2x_napi_disable(struct bnx2x *bp)
7555{
7556 int i;
7557
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007558 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007559 napi_disable(&bnx2x_fp(bp, i, napi));
7560}
7561
7562static void bnx2x_netif_start(struct bnx2x *bp)
7563{
Eilon Greensteine1510702009-07-21 05:47:41 +00007564 int intr_sem;
7565
7566 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7567 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7568
7569 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007570 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007571 bnx2x_napi_enable(bp);
7572 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007573 if (bp->state == BNX2X_STATE_OPEN)
7574 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007575 }
7576 }
7577}
7578
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007579static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007580{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007581 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007582 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007583 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007584}
7585
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007586/*
7587 * Init service functions
7588 */
7589
Michael Chane665bfd2009-10-10 13:46:54 +00007590/**
7591 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7592 *
7593 * @param bp driver descriptor
7594 * @param set set or clear an entry (1 or 0)
7595 * @param mac pointer to a buffer containing a MAC
7596 * @param cl_bit_vec bit vector of clients to register a MAC for
7597 * @param cam_offset offset in a CAM to use
7598 * @param with_bcast set broadcast MAC as well
7599 */
7600static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7601 u32 cl_bit_vec, u8 cam_offset,
7602 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007603{
7604 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007605 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007606
7607 /* CAM allocation
7608 * unicasts 0-31:port0 32-63:port1
7609 * multicast 64-127:port0 128-191:port1
7610 */
Michael Chane665bfd2009-10-10 13:46:54 +00007611 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7612 config->hdr.offset = cam_offset;
7613 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007614 config->hdr.reserved1 = 0;
7615
7616 /* primary MAC */
7617 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007618 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007619 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007620 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007621 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007622 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007623 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007624 if (set)
7625 config->config_table[0].target_table_entry.flags = 0;
7626 else
7627 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007628 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007629 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007630 config->config_table[0].target_table_entry.vlan_id = 0;
7631
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007632 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7633 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007634 config->config_table[0].cam_entry.msb_mac_addr,
7635 config->config_table[0].cam_entry.middle_mac_addr,
7636 config->config_table[0].cam_entry.lsb_mac_addr);
7637
7638 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007639 if (with_bcast) {
7640 config->config_table[1].cam_entry.msb_mac_addr =
7641 cpu_to_le16(0xffff);
7642 config->config_table[1].cam_entry.middle_mac_addr =
7643 cpu_to_le16(0xffff);
7644 config->config_table[1].cam_entry.lsb_mac_addr =
7645 cpu_to_le16(0xffff);
7646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7647 if (set)
7648 config->config_table[1].target_table_entry.flags =
7649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7650 else
7651 CAM_INVALIDATE(config->config_table[1]);
7652 config->config_table[1].target_table_entry.clients_bit_vector =
7653 cpu_to_le32(cl_bit_vec);
7654 config->config_table[1].target_table_entry.vlan_id = 0;
7655 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007656
7657 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7658 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7659 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7660}
7661
Michael Chane665bfd2009-10-10 13:46:54 +00007662/**
7663 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7664 *
7665 * @param bp driver descriptor
7666 * @param set set or clear an entry (1 or 0)
7667 * @param mac pointer to a buffer containing a MAC
7668 * @param cl_bit_vec bit vector of clients to register a MAC for
7669 * @param cam_offset offset in a CAM to use
7670 */
7671static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7672 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007673{
7674 struct mac_configuration_cmd_e1h *config =
7675 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7676
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007677 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007678 config->hdr.offset = cam_offset;
7679 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007680 config->hdr.reserved1 = 0;
7681
7682 /* primary MAC */
7683 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007684 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007685 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007686 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007687 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007688 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007689 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007690 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007691 config->config_table[0].vlan_id = 0;
7692 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007693 if (set)
7694 config->config_table[0].flags = BP_PORT(bp);
7695 else
7696 config->config_table[0].flags =
7697 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007698
Michael Chane665bfd2009-10-10 13:46:54 +00007699 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007700 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007701 config->config_table[0].msb_mac_addr,
7702 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007703 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007704
7705 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7706 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7707 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7708}
7709
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007710static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7711 int *state_p, int poll)
7712{
7713 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007714 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007715
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007716 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7717 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007718
7719 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007720 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007721 if (poll) {
7722 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007723 /* if index is different from 0
7724 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007725 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007726 */
7727 if (idx)
7728 bnx2x_rx_int(&bp->fp[idx], 10);
7729 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007730
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007731 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007732 if (*state_p == state) {
7733#ifdef BNX2X_STOP_ON_ERROR
7734 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7735#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007736 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007737 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007738
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007739 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007740
7741 if (bp->panic)
7742 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007743 }
7744
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007745 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007746 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7747 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007748#ifdef BNX2X_STOP_ON_ERROR
7749 bnx2x_panic();
7750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007751
Eliezer Tamir49d66772008-02-28 11:53:13 -08007752 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007753}
7754
Michael Chane665bfd2009-10-10 13:46:54 +00007755static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7756{
7757 bp->set_mac_pending++;
7758 smp_wmb();
7759
7760 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7761 (1 << bp->fp->cl_id), BP_FUNC(bp));
7762
7763 /* Wait for a completion */
7764 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7765}
7766
7767static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7768{
7769 bp->set_mac_pending++;
7770 smp_wmb();
7771
7772 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7773 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7774 1);
7775
7776 /* Wait for a completion */
7777 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7778}
7779
Michael Chan993ac7b2009-10-10 13:46:56 +00007780#ifdef BCM_CNIC
7781/**
7782 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7783 * MAC(s). This function will wait until the ramdord completion
7784 * returns.
7785 *
7786 * @param bp driver handle
7787 * @param set set or clear the CAM entry
7788 *
7789 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7790 */
7791static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7792{
7793 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7794
7795 bp->set_mac_pending++;
7796 smp_wmb();
7797
7798 /* Send a SET_MAC ramrod */
7799 if (CHIP_IS_E1(bp))
7800 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7801 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7802 1);
7803 else
7804 /* CAM allocation for E1H
7805 * unicasts: by func number
7806 * multicast: 20+FUNC*20, 20 each
7807 */
7808 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7809 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7810
7811 /* Wait for a completion when setting */
7812 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7813
7814 return 0;
7815}
7816#endif
7817
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007818static int bnx2x_setup_leading(struct bnx2x *bp)
7819{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007820 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007821
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007822 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007823 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007824
7825 /* SETUP ramrod */
7826 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7827
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007828 /* Wait for completion */
7829 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007830
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007831 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007832}
7833
7834static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7835{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007836 struct bnx2x_fastpath *fp = &bp->fp[index];
7837
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007838 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007839 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007840
Eliezer Tamir228241e2008-02-28 11:56:57 -08007841 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007842 fp->state = BNX2X_FP_STATE_OPENING;
7843 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7844 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007845
7846 /* Wait for completion */
7847 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007848 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007849}
7850
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007851static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007852
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007853static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007854{
Eilon Greensteinca003922009-08-12 22:53:28 -07007855
7856 switch (bp->multi_mode) {
7857 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007858 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007859 break;
7860
7861 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007862 if (num_queues)
7863 bp->num_queues = min_t(u32, num_queues,
7864 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007865 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007866 bp->num_queues = min_t(u32, num_online_cpus(),
7867 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007868 break;
7869
7870
7871 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007872 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007873 break;
7874 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007875}
7876
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007877static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007878{
7879 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007880
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00007881 switch (bp->int_mode) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007882 case INT_MODE_INTx:
7883 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007884 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007885 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007886 break;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007887 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007888 /* Set number of queues according to bp->multi_mode value */
7889 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007890
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007891 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7892 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007893
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007894 /* if we can't use MSI-X we only need one fp,
7895 * so try to enable MSI-X with the requested number of fp's
7896 * and fallback to MSI or legacy INTx with one fp
7897 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007898 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007899 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007900 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007901 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007902 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007903 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007904 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007905 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007906}
7907
Michael Chan993ac7b2009-10-10 13:46:56 +00007908#ifdef BCM_CNIC
7909static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7910static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7911#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007912
7913/* must be called with rtnl_lock */
7914static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7915{
7916 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007917 int i, rc;
7918
Eilon Greenstein8badd272009-02-12 08:36:15 +00007919#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007920 if (unlikely(bp->panic))
7921 return -EPERM;
7922#endif
7923
7924 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7925
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007926 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007927
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007928 if (bnx2x_alloc_mem(bp)) {
7929 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007930 return -ENOMEM;
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007931 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007932
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007933 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007934 bnx2x_fp(bp, i, disable_tpa) =
7935 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7936
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007937 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007938 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7939 bnx2x_poll, 128);
7940
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007941 bnx2x_napi_enable(bp);
7942
7943 if (bp->flags & USING_MSIX_FLAG) {
7944 rc = bnx2x_req_msix_irqs(bp);
7945 if (rc) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007946 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007947 goto load_error1;
7948 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007949 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007950 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007951 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007952 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7953 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007954 bnx2x_ack_int(bp);
7955 rc = bnx2x_req_irq(bp);
7956 if (rc) {
7957 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007958 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007959 goto load_error1;
7960 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007961 if (bp->flags & USING_MSI_FLAG) {
7962 bp->dev->irq = bp->pdev->irq;
Joe Perches7995c642010-02-17 15:01:52 +00007963 netdev_info(bp->dev, "using MSI IRQ %d\n",
7964 bp->pdev->irq);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007965 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007966 }
7967
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007968 /* Send LOAD_REQUEST command to MCP
7969 Returns the type of LOAD command:
7970 if it is the first port to be initialized
7971 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007972 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007973 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007974 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7975 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007976 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007977 rc = -EBUSY;
7978 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007979 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007980 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7981 rc = -EBUSY; /* other port in diagnostic mode */
7982 goto load_error2;
7983 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007985 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007986 int port = BP_PORT(bp);
7987
Eilon Greensteinf5372252009-02-12 08:38:30 +00007988 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989 load_count[0], load_count[1], load_count[2]);
7990 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007991 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007992 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007993 load_count[0], load_count[1], load_count[2]);
7994 if (load_count[0] == 1)
7995 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007996 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007997 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7998 else
7999 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008000 }
8001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008002 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8003 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8004 bp->port.pmf = 1;
8005 else
8006 bp->port.pmf = 0;
8007 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8008
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008009 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008010 rc = bnx2x_init_hw(bp, load_code);
8011 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008012 BNX2X_ERR("HW init failed, aborting\n");
Vladislav Zolotarovf1e1a192010-02-17 02:03:33 +00008013 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008016 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008017 }
8018
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008019 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07008020 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008021
Eilon Greenstein2691d512009-08-12 08:22:08 +00008022 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8023 (bp->common.shmem2_base))
8024 SHMEM2_WR(bp, dcc_support,
8025 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8026 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8027
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008028 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008029 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08008030 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8031 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008032 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008033 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008034 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008035 }
8036 }
8037
8038 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008040 rc = bnx2x_setup_leading(bp);
8041 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008042 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00008043#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008044 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00008045#else
8046 bp->panic = 1;
8047 return -EBUSY;
8048#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008049 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008050
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008051 if (CHIP_IS_E1H(bp))
8052 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00008053 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008054 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008055 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008056
Eilon Greensteinca003922009-08-12 22:53:28 -07008057 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00008058#ifdef BCM_CNIC
8059 /* Enable Timer scan */
8060 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8061#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008062 for_each_nondefault_queue(bp, i) {
8063 rc = bnx2x_setup_multi(bp, i);
8064 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00008065#ifdef BCM_CNIC
8066 goto load_error4;
8067#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008068 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00008069#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008070 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008071
Eilon Greensteinca003922009-08-12 22:53:28 -07008072 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00008073 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07008074 else
Michael Chane665bfd2009-10-10 13:46:54 +00008075 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00008076#ifdef BCM_CNIC
8077 /* Set iSCSI L2 MAC */
8078 mutex_lock(&bp->cnic_mutex);
8079 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8080 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8081 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
Michael Chan4a6e47a2009-12-25 17:13:07 -08008082 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8083 CNIC_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00008084 }
8085 mutex_unlock(&bp->cnic_mutex);
8086#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07008087 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008088
8089 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00008090 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008091
8092 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008093 switch (load_mode) {
8094 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07008095 if (bp->state == BNX2X_STATE_OPEN) {
8096 /* Tx queue should be only reenabled */
8097 netif_tx_wake_all_queues(bp->dev);
8098 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008099 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008100 bnx2x_set_rx_mode(bp->dev);
8101 break;
8102
8103 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008104 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07008105 if (bp->state != BNX2X_STATE_OPEN)
8106 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008107 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008108 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008109 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008110
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008111 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008112 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008113 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008114 bp->state = BNX2X_STATE_DIAG;
8115 break;
8116
8117 default:
8118 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008119 }
8120
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008121 if (!bp->port.pmf)
8122 bnx2x__link_status_update(bp);
8123
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008124 /* start the timer */
8125 mod_timer(&bp->timer, jiffies + bp->current_interval);
8126
Michael Chan993ac7b2009-10-10 13:46:56 +00008127#ifdef BCM_CNIC
8128 bnx2x_setup_cnic_irq_info(bp);
8129 if (bp->state == BNX2X_STATE_OPEN)
8130 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8131#endif
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008132 bnx2x_inc_load_cnt(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008133
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008134 return 0;
8135
Michael Chan37b091b2009-10-10 13:46:55 +00008136#ifdef BCM_CNIC
8137load_error4:
8138 /* Disable Timer scan */
8139 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8140#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008141load_error3:
8142 bnx2x_int_disable_sync(bp, 1);
8143 if (!BP_NOMCP(bp)) {
8144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8145 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8146 }
8147 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008148 /* Free SKBs, SGEs, TPA pool and driver internals */
8149 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008150 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008151 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008152load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07008153 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00008154 bnx2x_free_irq(bp, false);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008155load_error1:
8156 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008157 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008158 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008159 bnx2x_free_mem(bp);
8160
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008161 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008162}
8163
8164static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8165{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008166 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008167 int rc;
8168
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008169 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008170 fp->state = BNX2X_FP_STATE_HALTING;
8171 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008172
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008173 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008174 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008175 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008176 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008177 return rc;
8178
8179 /* delete cfc entry */
8180 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8181
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008182 /* Wait for completion */
8183 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008184 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008185 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008186}
8187
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008188static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008189{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008190 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008191 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008192 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008193 int cnt = 500;
8194 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008195
8196 might_sleep();
8197
8198 /* Send HALT ramrod */
8199 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00008200 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008201
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008202 /* Wait for completion */
8203 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8204 &(bp->fp[0].state), 1);
8205 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008206 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008207
Eliezer Tamir49d66772008-02-28 11:53:13 -08008208 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008209
Eliezer Tamir228241e2008-02-28 11:56:57 -08008210 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008211 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8212
Eliezer Tamir49d66772008-02-28 11:53:13 -08008213 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008214 we are going to reset the chip anyway
8215 so there is not much to do if this times out
8216 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008217 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008218 if (!cnt) {
8219 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8220 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8221 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8222#ifdef BNX2X_STOP_ON_ERROR
8223 bnx2x_panic();
8224#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00008225 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008226 break;
8227 }
8228 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008229 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00008230 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08008231 }
8232 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8233 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008234
8235 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008236}
8237
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008238static void bnx2x_reset_func(struct bnx2x *bp)
8239{
8240 int port = BP_PORT(bp);
8241 int func = BP_FUNC(bp);
8242 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08008243
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008244 /* Configure IGU */
8245 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8246 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8247
Michael Chan37b091b2009-10-10 13:46:55 +00008248#ifdef BCM_CNIC
8249 /* Disable Timer scan */
8250 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8251 /*
8252 * Wait for at least 10ms and up to 2 second for the timers scan to
8253 * complete
8254 */
8255 for (i = 0; i < 200; i++) {
8256 msleep(10);
8257 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8258 break;
8259 }
8260#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008261 /* Clear ILT */
8262 base = FUNC_ILT_BASE(func);
8263 for (i = base; i < base + ILT_PER_FUNC; i++)
8264 bnx2x_ilt_wr(bp, i, 0);
8265}
8266
8267static void bnx2x_reset_port(struct bnx2x *bp)
8268{
8269 int port = BP_PORT(bp);
8270 u32 val;
8271
8272 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8273
8274 /* Do not rcv packets to BRB */
8275 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8276 /* Do not direct rcv packets that are not for MCP to the BRB */
8277 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8278 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8279
8280 /* Configure AEU */
8281 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8282
8283 msleep(100);
8284 /* Check for BRB port occupancy */
8285 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8286 if (val)
8287 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07008288 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008289
8290 /* TODO: Close Doorbell port? */
8291}
8292
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008293static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8294{
8295 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8296 BP_FUNC(bp), reset_code);
8297
8298 switch (reset_code) {
8299 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8300 bnx2x_reset_port(bp);
8301 bnx2x_reset_func(bp);
8302 bnx2x_reset_common(bp);
8303 break;
8304
8305 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8306 bnx2x_reset_port(bp);
8307 bnx2x_reset_func(bp);
8308 break;
8309
8310 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8311 bnx2x_reset_func(bp);
8312 break;
8313
8314 default:
8315 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8316 break;
8317 }
8318}
8319
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008320static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008321{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008322 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008323 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008324 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008325
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008326 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008327 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08008328 struct bnx2x_fastpath *fp = &bp->fp[i];
8329
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008330 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08008331 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008332
Eilon Greenstein7961f792009-03-02 07:59:31 +00008333 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008334 if (!cnt) {
8335 BNX2X_ERR("timeout waiting for queue[%d]\n",
8336 i);
8337#ifdef BNX2X_STOP_ON_ERROR
8338 bnx2x_panic();
8339 return -EBUSY;
8340#else
8341 break;
8342#endif
8343 }
8344 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008345 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008346 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08008347 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008348 /* Give HW time to discard old tx messages */
8349 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008350
Yitchak Gertner65abd742008-08-25 15:26:24 -07008351 if (CHIP_IS_E1(bp)) {
8352 struct mac_configuration_cmd *config =
8353 bnx2x_sp(bp, mcast_config);
8354
Michael Chane665bfd2009-10-10 13:46:54 +00008355 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07008356
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08008357 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07008358 CAM_INVALIDATE(config->config_table[i]);
8359
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08008360 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07008361 if (CHIP_REV_IS_SLOW(bp))
8362 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8363 else
8364 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00008365 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07008366 config->hdr.reserved1 = 0;
8367
Michael Chane665bfd2009-10-10 13:46:54 +00008368 bp->set_mac_pending++;
8369 smp_wmb();
8370
Yitchak Gertner65abd742008-08-25 15:26:24 -07008371 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8372 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8373 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8374
8375 } else { /* E1H */
8376 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8377
Michael Chane665bfd2009-10-10 13:46:54 +00008378 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07008379
8380 for (i = 0; i < MC_HASH_SIZE; i++)
8381 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008382
8383 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07008384 }
Michael Chan993ac7b2009-10-10 13:46:56 +00008385#ifdef BCM_CNIC
8386 /* Clear iSCSI L2 MAC */
8387 mutex_lock(&bp->cnic_mutex);
8388 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8389 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8390 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8391 }
8392 mutex_unlock(&bp->cnic_mutex);
8393#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07008394
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008395 if (unload_mode == UNLOAD_NORMAL)
8396 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008397
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008398 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008399 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008400
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008401 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008402 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008403 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008404 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008405 /* The mac address is written to entries 1-4 to
8406 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008407 u8 entry = (BP_E1HVN(bp) + 1)*8;
8408
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008409 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07008410 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008411
8412 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8413 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07008414 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008415
8416 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008417
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008418 } else
8419 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008421 /* Close multi and leading connections
8422 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008423 for_each_nondefault_queue(bp, i)
8424 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08008425 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008426
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008427 rc = bnx2x_stop_leading(bp);
8428 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008429 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008430#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008431 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008432#else
8433 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008434#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08008435 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008436
Eliezer Tamir228241e2008-02-28 11:56:57 -08008437unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008438 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08008439 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008440 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00008441 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008442 load_count[0], load_count[1], load_count[2]);
8443 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008444 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00008445 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008446 load_count[0], load_count[1], load_count[2]);
8447 if (load_count[0] == 0)
8448 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008449 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008450 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8451 else
8452 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8453 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008454
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008455 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8456 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8457 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008458
8459 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08008460 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008461
8462 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008463 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008464 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00008465
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008466}
8467
8468static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8469{
8470 u32 val;
8471
8472 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8473
8474 if (CHIP_IS_E1(bp)) {
8475 int port = BP_PORT(bp);
8476 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8478
8479 val = REG_RD(bp, addr);
8480 val &= ~(0x300);
8481 REG_WR(bp, addr, val);
8482 } else if (CHIP_IS_E1H(bp)) {
8483 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8484 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8485 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8486 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8487 }
8488}
8489
8490/* must be called with rtnl_lock */
8491static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8492{
8493 int i;
8494
8495 if (bp->state == BNX2X_STATE_CLOSED) {
8496 /* Interface has been removed - nothing to recover */
8497 bp->recovery_state = BNX2X_RECOVERY_DONE;
8498 bp->is_leader = 0;
8499 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8500 smp_wmb();
8501
8502 return -EINVAL;
8503 }
8504
8505#ifdef BCM_CNIC
8506 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8507#endif
8508 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8509
8510 /* Set "drop all" */
8511 bp->rx_mode = BNX2X_RX_MODE_NONE;
8512 bnx2x_set_storm_rx_mode(bp);
8513
8514 /* Disable HW interrupts, NAPI and Tx */
8515 bnx2x_netif_stop(bp, 1);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07008516 netif_carrier_off(bp->dev);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008517
8518 del_timer_sync(&bp->timer);
8519 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8520 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8521 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8522
8523 /* Release IRQs */
8524 bnx2x_free_irq(bp, false);
8525
8526 /* Cleanup the chip if needed */
8527 if (unload_mode != UNLOAD_RECOVERY)
8528 bnx2x_chip_cleanup(bp, unload_mode);
8529
Eilon Greenstein9a035442008-11-03 16:45:55 -08008530 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008531
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008532 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008533 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008534 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008535 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008536 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008537 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008538 bnx2x_free_mem(bp);
8539
8540 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008541
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008542 /* The last driver must disable a "close the gate" if there is no
8543 * parity attention or "process kill" pending.
8544 */
8545 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8546 bnx2x_reset_is_done(bp))
8547 bnx2x_disable_close_the_gate(bp);
8548
8549 /* Reset MCP mail box sequence if there is on going recovery */
8550 if (unload_mode == UNLOAD_RECOVERY)
8551 bp->fw_seq = 0;
8552
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008553 return 0;
8554}
8555
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008556/* Close gates #2, #3 and #4: */
8557static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8558{
8559 u32 val, addr;
8560
8561 /* Gates #2 and #4a are closed/opened for "not E1" only */
8562 if (!CHIP_IS_E1(bp)) {
8563 /* #4 */
8564 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8565 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8566 close ? (val | 0x1) : (val & (~(u32)1)));
8567 /* #2 */
8568 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8569 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8570 close ? (val | 0x1) : (val & (~(u32)1)));
8571 }
8572
8573 /* #3 */
8574 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8575 val = REG_RD(bp, addr);
8576 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8577
8578 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8579 close ? "closing" : "opening");
8580 mmiowb();
8581}
8582
8583#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8584
8585static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8586{
8587 /* Do some magic... */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 *magic_val = val & SHARED_MF_CLP_MAGIC;
8590 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8591}
8592
8593/* Restore the value of the `magic' bit.
8594 *
8595 * @param pdev Device handle.
8596 * @param magic_val Old value of the `magic' bit.
8597 */
8598static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8599{
8600 /* Restore the `magic' bit value... */
8601 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8602 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8603 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8604 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8605 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8606 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8607}
8608
8609/* Prepares for MCP reset: takes care of CLP configurations.
8610 *
8611 * @param bp
8612 * @param magic_val Old value of 'magic' bit.
8613 */
8614static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8615{
8616 u32 shmem;
8617 u32 validity_offset;
8618
8619 DP(NETIF_MSG_HW, "Starting\n");
8620
8621 /* Set `magic' bit in order to save MF config */
8622 if (!CHIP_IS_E1(bp))
8623 bnx2x_clp_reset_prep(bp, magic_val);
8624
8625 /* Get shmem offset */
8626 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8627 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8628
8629 /* Clear validity map flags */
8630 if (shmem > 0)
8631 REG_WR(bp, shmem + validity_offset, 0);
8632}
8633
8634#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8635#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8636
8637/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8638 * depending on the HW type.
8639 *
8640 * @param bp
8641 */
8642static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8643{
8644 /* special handling for emulation and FPGA,
8645 wait 10 times longer */
8646 if (CHIP_REV_IS_SLOW(bp))
8647 msleep(MCP_ONE_TIMEOUT*10);
8648 else
8649 msleep(MCP_ONE_TIMEOUT);
8650}
8651
8652static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8653{
8654 u32 shmem, cnt, validity_offset, val;
8655 int rc = 0;
8656
8657 msleep(100);
8658
8659 /* Get shmem offset */
8660 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8661 if (shmem == 0) {
8662 BNX2X_ERR("Shmem 0 return failure\n");
8663 rc = -ENOTTY;
8664 goto exit_lbl;
8665 }
8666
8667 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8668
8669 /* Wait for MCP to come up */
8670 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8671 /* TBD: its best to check validity map of last port.
8672 * currently checks on port 0.
8673 */
8674 val = REG_RD(bp, shmem + validity_offset);
8675 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8676 shmem + validity_offset, val);
8677
8678 /* check that shared memory is valid. */
8679 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8680 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8681 break;
8682
8683 bnx2x_mcp_wait_one(bp);
8684 }
8685
8686 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8687
8688 /* Check that shared memory is valid. This indicates that MCP is up. */
8689 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8690 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8691 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8692 rc = -ENOTTY;
8693 goto exit_lbl;
8694 }
8695
8696exit_lbl:
8697 /* Restore the `magic' bit value */
8698 if (!CHIP_IS_E1(bp))
8699 bnx2x_clp_reset_done(bp, magic_val);
8700
8701 return rc;
8702}
8703
8704static void bnx2x_pxp_prep(struct bnx2x *bp)
8705{
8706 if (!CHIP_IS_E1(bp)) {
8707 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8708 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8709 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8710 mmiowb();
8711 }
8712}
8713
8714/*
8715 * Reset the whole chip except for:
8716 * - PCIE core
8717 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8718 * one reset bit)
8719 * - IGU
8720 * - MISC (including AEU)
8721 * - GRC
8722 * - RBCN, RBCP
8723 */
8724static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8725{
8726 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8727
8728 not_reset_mask1 =
8729 MISC_REGISTERS_RESET_REG_1_RST_HC |
8730 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8731 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8732
8733 not_reset_mask2 =
8734 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8735 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8736 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8737 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8738 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8739 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8740 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8741 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8742
8743 reset_mask1 = 0xffffffff;
8744
8745 if (CHIP_IS_E1(bp))
8746 reset_mask2 = 0xffff;
8747 else
8748 reset_mask2 = 0x1ffff;
8749
8750 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8751 reset_mask1 & (~not_reset_mask1));
8752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8753 reset_mask2 & (~not_reset_mask2));
8754
8755 barrier();
8756 mmiowb();
8757
8758 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8759 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8760 mmiowb();
8761}
8762
8763static int bnx2x_process_kill(struct bnx2x *bp)
8764{
8765 int cnt = 1000;
8766 u32 val = 0;
8767 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8768
8769
8770 /* Empty the Tetris buffer, wait for 1s */
8771 do {
8772 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8773 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8774 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8775 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8776 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8777 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8778 ((port_is_idle_0 & 0x1) == 0x1) &&
8779 ((port_is_idle_1 & 0x1) == 0x1) &&
8780 (pgl_exp_rom2 == 0xffffffff))
8781 break;
8782 msleep(1);
8783 } while (cnt-- > 0);
8784
8785 if (cnt <= 0) {
8786 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8787 " are still"
8788 " outstanding read requests after 1s!\n");
8789 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8790 " port_is_idle_0=0x%08x,"
8791 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8792 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8793 pgl_exp_rom2);
8794 return -EAGAIN;
8795 }
8796
8797 barrier();
8798
8799 /* Close gates #2, #3 and #4 */
8800 bnx2x_set_234_gates(bp, true);
8801
8802 /* TBD: Indicate that "process kill" is in progress to MCP */
8803
8804 /* Clear "unprepared" bit */
8805 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8806 barrier();
8807
8808 /* Make sure all is written to the chip before the reset */
8809 mmiowb();
8810
8811 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8812 * PSWHST, GRC and PSWRD Tetris buffer.
8813 */
8814 msleep(1);
8815
8816 /* Prepare to chip reset: */
8817 /* MCP */
8818 bnx2x_reset_mcp_prep(bp, &val);
8819
8820 /* PXP */
8821 bnx2x_pxp_prep(bp);
8822 barrier();
8823
8824 /* reset the chip */
8825 bnx2x_process_kill_chip_reset(bp);
8826 barrier();
8827
8828 /* Recover after reset: */
8829 /* MCP */
8830 if (bnx2x_reset_mcp_comp(bp, val))
8831 return -EAGAIN;
8832
8833 /* PXP */
8834 bnx2x_pxp_prep(bp);
8835
8836 /* Open the gates #2, #3 and #4 */
8837 bnx2x_set_234_gates(bp, false);
8838
8839 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8840 * reset state, re-enable attentions. */
8841
8842 return 0;
8843}
8844
8845static int bnx2x_leader_reset(struct bnx2x *bp)
8846{
8847 int rc = 0;
8848 /* Try to recover after the failure */
8849 if (bnx2x_process_kill(bp)) {
8850 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8851 bp->dev->name);
8852 rc = -EAGAIN;
8853 goto exit_leader_reset;
8854 }
8855
8856 /* Clear "reset is in progress" bit and update the driver state */
8857 bnx2x_set_reset_done(bp);
8858 bp->recovery_state = BNX2X_RECOVERY_DONE;
8859
8860exit_leader_reset:
8861 bp->is_leader = 0;
8862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8863 smp_wmb();
8864 return rc;
8865}
8866
8867static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8868
8869/* Assumption: runs under rtnl lock. This together with the fact
8870 * that it's called only from bnx2x_reset_task() ensure that it
8871 * will never be called when netif_running(bp->dev) is false.
8872 */
8873static void bnx2x_parity_recover(struct bnx2x *bp)
8874{
8875 DP(NETIF_MSG_HW, "Handling parity\n");
8876 while (1) {
8877 switch (bp->recovery_state) {
8878 case BNX2X_RECOVERY_INIT:
8879 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8880 /* Try to get a LEADER_LOCK HW lock */
8881 if (bnx2x_trylock_hw_lock(bp,
8882 HW_LOCK_RESOURCE_RESERVED_08))
8883 bp->is_leader = 1;
8884
8885 /* Stop the driver */
8886 /* If interface has been removed - break */
8887 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8888 return;
8889
8890 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8891 /* Ensure "is_leader" and "recovery_state"
8892 * update values are seen on other CPUs
8893 */
8894 smp_wmb();
8895 break;
8896
8897 case BNX2X_RECOVERY_WAIT:
8898 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8899 if (bp->is_leader) {
8900 u32 load_counter = bnx2x_get_load_cnt(bp);
8901 if (load_counter) {
8902 /* Wait until all other functions get
8903 * down.
8904 */
8905 schedule_delayed_work(&bp->reset_task,
8906 HZ/10);
8907 return;
8908 } else {
8909 /* If all other functions got down -
8910 * try to bring the chip back to
8911 * normal. In any case it's an exit
8912 * point for a leader.
8913 */
8914 if (bnx2x_leader_reset(bp) ||
8915 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8916 printk(KERN_ERR"%s: Recovery "
8917 "has failed. Power cycle is "
8918 "needed.\n", bp->dev->name);
8919 /* Disconnect this device */
8920 netif_device_detach(bp->dev);
8921 /* Block ifup for all function
8922 * of this ASIC until
8923 * "process kill" or power
8924 * cycle.
8925 */
8926 bnx2x_set_reset_in_progress(bp);
8927 /* Shut down the power */
8928 bnx2x_set_power_state(bp,
8929 PCI_D3hot);
8930 return;
8931 }
8932
8933 return;
8934 }
8935 } else { /* non-leader */
8936 if (!bnx2x_reset_is_done(bp)) {
8937 /* Try to get a LEADER_LOCK HW lock as
8938 * long as a former leader may have
8939 * been unloaded by the user or
8940 * released a leadership by another
8941 * reason.
8942 */
8943 if (bnx2x_trylock_hw_lock(bp,
8944 HW_LOCK_RESOURCE_RESERVED_08)) {
8945 /* I'm a leader now! Restart a
8946 * switch case.
8947 */
8948 bp->is_leader = 1;
8949 break;
8950 }
8951
8952 schedule_delayed_work(&bp->reset_task,
8953 HZ/10);
8954 return;
8955
8956 } else { /* A leader has completed
8957 * the "process kill". It's an exit
8958 * point for a non-leader.
8959 */
8960 bnx2x_nic_load(bp, LOAD_NORMAL);
8961 bp->recovery_state =
8962 BNX2X_RECOVERY_DONE;
8963 smp_wmb();
8964 return;
8965 }
8966 }
8967 default:
8968 return;
8969 }
8970 }
8971}
8972
8973/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8974 * scheduled on a general queue in order to prevent a dead lock.
8975 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008976static void bnx2x_reset_task(struct work_struct *work)
8977{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008978 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008979
8980#ifdef BNX2X_STOP_ON_ERROR
8981 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8982 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008983 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008984 return;
8985#endif
8986
8987 rtnl_lock();
8988
8989 if (!netif_running(bp->dev))
8990 goto reset_task_exit;
8991
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008992 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8993 bnx2x_parity_recover(bp);
8994 else {
8995 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8996 bnx2x_nic_load(bp, LOAD_NORMAL);
8997 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008998
8999reset_task_exit:
9000 rtnl_unlock();
9001}
9002
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009003/* end of nic load/unload */
9004
9005/* ethtool_ops */
9006
9007/*
9008 * Init service functions
9009 */
9010
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00009011static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9012{
9013 switch (func) {
9014 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9015 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9016 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9017 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9018 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9019 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9020 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9021 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9022 default:
9023 BNX2X_ERR("Unsupported function index: %d\n", func);
9024 return (u32)(-1);
9025 }
9026}
9027
9028static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9029{
9030 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9031
9032 /* Flush all outstanding writes */
9033 mmiowb();
9034
9035 /* Pretend to be function 0 */
9036 REG_WR(bp, reg, 0);
9037 /* Flush the GRC transaction (in the chip) */
9038 new_val = REG_RD(bp, reg);
9039 if (new_val != 0) {
9040 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9041 new_val);
9042 BUG();
9043 }
9044
9045 /* From now we are in the "like-E1" mode */
9046 bnx2x_int_disable(bp);
9047
9048 /* Flush all outstanding writes */
9049 mmiowb();
9050
9051 /* Restore the original funtion settings */
9052 REG_WR(bp, reg, orig_func);
9053 new_val = REG_RD(bp, reg);
9054 if (new_val != orig_func) {
9055 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9056 orig_func, new_val);
9057 BUG();
9058 }
9059}
9060
9061static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9062{
9063 if (CHIP_IS_E1H(bp))
9064 bnx2x_undi_int_disable_e1h(bp, func);
9065 else
9066 bnx2x_int_disable(bp);
9067}
9068
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009069static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009071 u32 val;
9072
9073 /* Check if there is any driver already loaded */
9074 val = REG_RD(bp, MISC_REG_UNPREPARED);
9075 if (val == 0x1) {
9076 /* Check if it is the UNDI driver
9077 * UNDI driver initializes CID offset for normal bell to 0x7
9078 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009079 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009080 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9081 if (val == 0x7) {
9082 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009083 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009084 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009085 u32 swap_en;
9086 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009087
Eilon Greensteinb4661732009-01-14 06:43:56 +00009088 /* clear the UNDI indication */
9089 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9090
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009091 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9092
9093 /* try unload UNDI on port 0 */
9094 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009095 bp->fw_seq =
9096 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9097 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009098 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009099
9100 /* if UNDI is loaded on the other port */
9101 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9102
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009103 /* send "DONE" for previous unload */
9104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9105
9106 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009107 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009108 bp->fw_seq =
9109 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9110 DRV_MSG_SEQ_NUMBER_MASK);
9111 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009112
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009113 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009114 }
9115
Eilon Greensteinb4661732009-01-14 06:43:56 +00009116 /* now it's safe to release the lock */
9117 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9118
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00009119 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009120
9121 /* close input traffic and wait for it */
9122 /* Do not rcv packets to BRB */
9123 REG_WR(bp,
9124 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9125 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9126 /* Do not direct rcv packets that are not for MCP to
9127 * the BRB */
9128 REG_WR(bp,
9129 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9130 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9131 /* clear AEU */
9132 REG_WR(bp,
9133 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9134 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9135 msleep(10);
9136
9137 /* save NIG port swap info */
9138 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9139 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009140 /* reset device */
9141 REG_WR(bp,
9142 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009143 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009144 REG_WR(bp,
9145 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9146 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009147 /* take the NIG out of reset and restore swap values */
9148 REG_WR(bp,
9149 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9150 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9151 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9152 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9153
9154 /* send unload done to the MCP */
9155 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9156
9157 /* restore our func and fw_seq */
9158 bp->func = func;
9159 bp->fw_seq =
9160 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9161 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00009162
9163 } else
9164 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009165 }
9166}
9167
9168static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9169{
9170 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07009171 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009172
9173 /* Get the chip revision id and number. */
9174 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9175 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9176 id = ((val & 0xffff) << 16);
9177 val = REG_RD(bp, MISC_REG_CHIP_REV);
9178 id |= ((val & 0xf) << 12);
9179 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9180 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00009181 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009182 id |= (val & 0xf);
9183 bp->common.chip_id = id;
9184 bp->link_params.chip_id = bp->common.chip_id;
9185 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9186
Eilon Greenstein1c063282009-02-12 08:36:43 +00009187 val = (REG_RD(bp, 0x2874) & 0x55);
9188 if ((bp->common.chip_id & 0x1) ||
9189 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9190 bp->flags |= ONE_PORT_FLAG;
9191 BNX2X_DEV_INFO("single port device\n");
9192 }
9193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009194 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9195 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9196 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9197 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9198 bp->common.flash_size, bp->common.flash_size);
9199
9200 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00009201 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009202 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00009203 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9204 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009205
9206 if (!bp->common.shmem_base ||
9207 (bp->common.shmem_base < 0xA0000) ||
9208 (bp->common.shmem_base >= 0xC0000)) {
9209 BNX2X_DEV_INFO("MCP not active\n");
9210 bp->flags |= NO_MCP_FLAG;
9211 return;
9212 }
9213
9214 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9215 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9216 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009217 BNX2X_ERROR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009218
9219 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00009220 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009221
9222 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9223 SHARED_HW_CFG_LED_MODE_MASK) >>
9224 SHARED_HW_CFG_LED_MODE_SHIFT);
9225
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009226 bp->link_params.feature_config_flags = 0;
9227 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9228 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9229 bp->link_params.feature_config_flags |=
9230 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9231 else
9232 bp->link_params.feature_config_flags &=
9233 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9234
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009235 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9236 bp->common.bc_ver = val;
9237 BNX2X_DEV_INFO("bc_ver %X\n", val);
9238 if (val < BNX2X_BC_VER) {
9239 /* for now only warn
9240 * later we might need to enforce this */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009241 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9242 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009243 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009244 bp->link_params.feature_config_flags |=
9245 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9246 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07009247
9248 if (BP_E1HVN(bp) == 0) {
9249 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9250 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9251 } else {
9252 /* no WOL capability for E1HVN != 0 */
9253 bp->flags |= NO_WOL_FLAG;
9254 }
9255 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00009256 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009257
9258 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9259 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9260 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9261 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9262
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009263 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9264 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009265}
9266
9267static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9268 u32 switch_cfg)
9269{
9270 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009271 u32 ext_phy_type;
9272
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009273 switch (switch_cfg) {
9274 case SWITCH_CFG_1G:
9275 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009277 ext_phy_type =
9278 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009279 switch (ext_phy_type) {
9280 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9281 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9282 ext_phy_type);
9283
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009284 bp->port.supported |= (SUPPORTED_10baseT_Half |
9285 SUPPORTED_10baseT_Full |
9286 SUPPORTED_100baseT_Half |
9287 SUPPORTED_100baseT_Full |
9288 SUPPORTED_1000baseT_Full |
9289 SUPPORTED_2500baseX_Full |
9290 SUPPORTED_TP |
9291 SUPPORTED_FIBRE |
9292 SUPPORTED_Autoneg |
9293 SUPPORTED_Pause |
9294 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009295 break;
9296
9297 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9299 ext_phy_type);
9300
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009301 bp->port.supported |= (SUPPORTED_10baseT_Half |
9302 SUPPORTED_10baseT_Full |
9303 SUPPORTED_100baseT_Half |
9304 SUPPORTED_100baseT_Full |
9305 SUPPORTED_1000baseT_Full |
9306 SUPPORTED_TP |
9307 SUPPORTED_FIBRE |
9308 SUPPORTED_Autoneg |
9309 SUPPORTED_Pause |
9310 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009311 break;
9312
9313 default:
9314 BNX2X_ERR("NVRAM config error. "
9315 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009316 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009317 return;
9318 }
9319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009320 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9321 port*0x10);
9322 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009323 break;
9324
9325 case SWITCH_CFG_10G:
9326 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9327
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009328 ext_phy_type =
9329 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009330 switch (ext_phy_type) {
9331 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9332 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9333 ext_phy_type);
9334
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009335 bp->port.supported |= (SUPPORTED_10baseT_Half |
9336 SUPPORTED_10baseT_Full |
9337 SUPPORTED_100baseT_Half |
9338 SUPPORTED_100baseT_Full |
9339 SUPPORTED_1000baseT_Full |
9340 SUPPORTED_2500baseX_Full |
9341 SUPPORTED_10000baseT_Full |
9342 SUPPORTED_TP |
9343 SUPPORTED_FIBRE |
9344 SUPPORTED_Autoneg |
9345 SUPPORTED_Pause |
9346 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009347 break;
9348
Eliezer Tamirf1410642008-02-28 11:51:50 -08009349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9350 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9351 ext_phy_type);
9352
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009353 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9354 SUPPORTED_1000baseT_Full |
9355 SUPPORTED_FIBRE |
9356 SUPPORTED_Autoneg |
9357 SUPPORTED_Pause |
9358 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009359 break;
9360
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009361 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9362 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9363 ext_phy_type);
9364
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009365 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9366 SUPPORTED_2500baseX_Full |
9367 SUPPORTED_1000baseT_Full |
9368 SUPPORTED_FIBRE |
9369 SUPPORTED_Autoneg |
9370 SUPPORTED_Pause |
9371 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009372 break;
9373
Eilon Greenstein589abe32009-02-12 08:36:55 +00009374 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9375 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9376 ext_phy_type);
9377
9378 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9379 SUPPORTED_FIBRE |
9380 SUPPORTED_Pause |
9381 SUPPORTED_Asym_Pause);
9382 break;
9383
9384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9385 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9386 ext_phy_type);
9387
9388 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9389 SUPPORTED_1000baseT_Full |
9390 SUPPORTED_FIBRE |
9391 SUPPORTED_Pause |
9392 SUPPORTED_Asym_Pause);
9393 break;
9394
9395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9396 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9397 ext_phy_type);
9398
9399 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9400 SUPPORTED_1000baseT_Full |
9401 SUPPORTED_Autoneg |
9402 SUPPORTED_FIBRE |
9403 SUPPORTED_Pause |
9404 SUPPORTED_Asym_Pause);
9405 break;
9406
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9408 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9409 ext_phy_type);
9410
9411 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9412 SUPPORTED_1000baseT_Full |
9413 SUPPORTED_Autoneg |
9414 SUPPORTED_FIBRE |
9415 SUPPORTED_Pause |
9416 SUPPORTED_Asym_Pause);
9417 break;
9418
Eliezer Tamirf1410642008-02-28 11:51:50 -08009419 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9420 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9421 ext_phy_type);
9422
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009423 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9424 SUPPORTED_TP |
9425 SUPPORTED_Autoneg |
9426 SUPPORTED_Pause |
9427 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009428 break;
9429
Eilon Greenstein28577182009-02-12 08:37:00 +00009430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9431 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9432 ext_phy_type);
9433
9434 bp->port.supported |= (SUPPORTED_10baseT_Half |
9435 SUPPORTED_10baseT_Full |
9436 SUPPORTED_100baseT_Half |
9437 SUPPORTED_100baseT_Full |
9438 SUPPORTED_1000baseT_Full |
9439 SUPPORTED_10000baseT_Full |
9440 SUPPORTED_TP |
9441 SUPPORTED_Autoneg |
9442 SUPPORTED_Pause |
9443 SUPPORTED_Asym_Pause);
9444 break;
9445
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009446 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9447 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9448 bp->link_params.ext_phy_config);
9449 break;
9450
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009451 default:
9452 BNX2X_ERR("NVRAM config error. "
9453 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009454 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009455 return;
9456 }
9457
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009458 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9459 port*0x18);
9460 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009461
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009462 break;
9463
9464 default:
9465 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009466 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009467 return;
9468 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009469 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009470
9471 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009474 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009475
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009476 if (!(bp->link_params.speed_cap_mask &
9477 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009478 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009479
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009480 if (!(bp->link_params.speed_cap_mask &
9481 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009482 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009484 if (!(bp->link_params.speed_cap_mask &
9485 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009486 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009487
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009488 if (!(bp->link_params.speed_cap_mask &
9489 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009490 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9491 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009492
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009493 if (!(bp->link_params.speed_cap_mask &
9494 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009495 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009496
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009497 if (!(bp->link_params.speed_cap_mask &
9498 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009499 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009500
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009501 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009502}
9503
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009504static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009505{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009506 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009507
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009508 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009509 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009510 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009511 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009512 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009513 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009514 u32 ext_phy_type =
9515 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9516
9517 if ((ext_phy_type ==
9518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9519 (ext_phy_type ==
9520 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009521 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009522 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009523 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009524 (ADVERTISED_10000baseT_Full |
9525 ADVERTISED_FIBRE);
9526 break;
9527 }
9528 BNX2X_ERR("NVRAM config error. "
9529 "Invalid link_config 0x%x"
9530 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009531 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009532 return;
9533 }
9534 break;
9535
9536 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009537 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009538 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009539 bp->port.advertising = (ADVERTISED_10baseT_Full |
9540 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009541 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009542 BNX2X_ERROR("NVRAM config error. "
9543 "Invalid link_config 0x%x"
9544 " speed_cap_mask 0x%x\n",
9545 bp->port.link_config,
9546 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009547 return;
9548 }
9549 break;
9550
9551 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009552 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009553 bp->link_params.req_line_speed = SPEED_10;
9554 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009555 bp->port.advertising = (ADVERTISED_10baseT_Half |
9556 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009557 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009558 BNX2X_ERROR("NVRAM config error. "
9559 "Invalid link_config 0x%x"
9560 " speed_cap_mask 0x%x\n",
9561 bp->port.link_config,
9562 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009563 return;
9564 }
9565 break;
9566
9567 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009568 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009569 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009570 bp->port.advertising = (ADVERTISED_100baseT_Full |
9571 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009572 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009573 BNX2X_ERROR("NVRAM config error. "
9574 "Invalid link_config 0x%x"
9575 " speed_cap_mask 0x%x\n",
9576 bp->port.link_config,
9577 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009578 return;
9579 }
9580 break;
9581
9582 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009583 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009584 bp->link_params.req_line_speed = SPEED_100;
9585 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009586 bp->port.advertising = (ADVERTISED_100baseT_Half |
9587 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009588 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009589 BNX2X_ERROR("NVRAM config error. "
9590 "Invalid link_config 0x%x"
9591 " speed_cap_mask 0x%x\n",
9592 bp->port.link_config,
9593 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009594 return;
9595 }
9596 break;
9597
9598 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009599 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009600 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009601 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9602 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009603 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009604 BNX2X_ERROR("NVRAM config error. "
9605 "Invalid link_config 0x%x"
9606 " speed_cap_mask 0x%x\n",
9607 bp->port.link_config,
9608 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009609 return;
9610 }
9611 break;
9612
9613 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009614 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009615 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009616 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9617 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009618 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009619 BNX2X_ERROR("NVRAM config error. "
9620 "Invalid link_config 0x%x"
9621 " speed_cap_mask 0x%x\n",
9622 bp->port.link_config,
9623 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009624 return;
9625 }
9626 break;
9627
9628 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9629 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9630 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009631 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009632 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009633 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9634 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009635 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009636 BNX2X_ERROR("NVRAM config error. "
9637 "Invalid link_config 0x%x"
9638 " speed_cap_mask 0x%x\n",
9639 bp->port.link_config,
9640 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009641 return;
9642 }
9643 break;
9644
9645 default:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009646 BNX2X_ERROR("NVRAM config error. "
9647 "BAD link speed link_config 0x%x\n",
9648 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009649 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009650 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009651 break;
9652 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009653
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009654 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9655 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08009656 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07009657 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08009658 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009659
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009660 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08009661 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009662 bp->link_params.req_line_speed,
9663 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009664 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009665}
9666
Michael Chane665bfd2009-10-10 13:46:54 +00009667static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9668{
9669 mac_hi = cpu_to_be16(mac_hi);
9670 mac_lo = cpu_to_be32(mac_lo);
9671 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9672 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9673}
9674
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009675static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009676{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009677 int port = BP_PORT(bp);
9678 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00009679 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009680 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009681 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009682
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009683 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009684 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009685
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009686 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009687 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009688 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009689 SHMEM_RD(bp,
9690 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009691 /* BCM8727_NOC => BCM8727 no over current */
9692 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9693 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9694 bp->link_params.ext_phy_config &=
9695 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9696 bp->link_params.ext_phy_config |=
9697 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9698 bp->link_params.feature_config_flags |=
9699 FEATURE_CONFIG_BCM8727_NOC;
9700 }
9701
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009702 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009703 SHMEM_RD(bp,
9704 dev_info.port_hw_config[port].speed_capability_mask);
9705
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009706 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009707 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9708
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009709 /* Get the 4 lanes xgxs config rx and tx */
9710 for (i = 0; i < 2; i++) {
9711 val = SHMEM_RD(bp,
9712 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9713 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9714 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9715
9716 val = SHMEM_RD(bp,
9717 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9718 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9719 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9720 }
9721
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00009722 /* If the device is capable of WoL, set the default state according
9723 * to the HW
9724 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009725 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00009726 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9727 (config & PORT_FEATURE_WOL_ENABLED));
9728
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009729 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9730 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009731 bp->link_params.lane_config,
9732 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009733 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009734
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009735 bp->link_params.switch_cfg |= (bp->port.link_config &
9736 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009737 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009738
9739 bnx2x_link_settings_requested(bp);
9740
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009741 /*
9742 * If connected directly, work with the internal PHY, otherwise, work
9743 * with the external PHY
9744 */
9745 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9746 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9747 bp->mdio.prtad = bp->link_params.phy_addr;
9748
9749 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9750 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9751 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009752 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009753
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009754 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9755 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00009756 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009757 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9758 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00009759
9760#ifdef BCM_CNIC
9761 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9762 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9763 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9764#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009765}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009766
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009767static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9768{
9769 int func = BP_FUNC(bp);
9770 u32 val, val2;
9771 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009772
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009773 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009774
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009775 bp->e1hov = 0;
9776 bp->e1hmf = 0;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00009777 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009778 bp->mf_config =
9779 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009780
Eilon Greenstein2691d512009-08-12 08:22:08 +00009781 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07009782 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00009783 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009784 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00009785 BNX2X_DEV_INFO("%s function mode\n",
9786 IS_E1HMF(bp) ? "multi" : "single");
9787
9788 if (IS_E1HMF(bp)) {
9789 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9790 e1hov_tag) &
9791 FUNC_MF_CFG_E1HOV_TAG_MASK);
9792 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9793 bp->e1hov = val;
9794 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9795 "(0x%04x)\n",
9796 func, bp->e1hov, bp->e1hov);
9797 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009798 BNX2X_ERROR("No valid E1HOV for func %d,"
9799 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009800 rc = -EPERM;
9801 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00009802 } else {
9803 if (BP_E1HVN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009804 BNX2X_ERROR("VN %d in single function mode,"
9805 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00009806 rc = -EPERM;
9807 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009808 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009809 }
9810
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009811 if (!BP_NOMCP(bp)) {
9812 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009814 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9815 DRV_MSG_SEQ_NUMBER_MASK);
9816 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9817 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009818
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009819 if (IS_E1HMF(bp)) {
9820 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9821 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9822 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9823 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9824 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9825 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9826 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9827 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9828 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9829 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9830 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9831 ETH_ALEN);
9832 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9833 ETH_ALEN);
9834 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009835
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009836 return rc;
9837 }
9838
9839 if (BP_NOMCP(bp)) {
9840 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009841 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009842 random_ether_addr(bp->dev->dev_addr);
9843 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9844 }
9845
9846 return rc;
9847}
9848
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00009849static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9850{
9851 int cnt, i, block_end, rodi;
9852 char vpd_data[BNX2X_VPD_LEN+1];
9853 char str_id_reg[VENDOR_ID_LEN+1];
9854 char str_id_cap[VENDOR_ID_LEN+1];
9855 u8 len;
9856
9857 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9858 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9859
9860 if (cnt < BNX2X_VPD_LEN)
9861 goto out_not_found;
9862
9863 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9864 PCI_VPD_LRDT_RO_DATA);
9865 if (i < 0)
9866 goto out_not_found;
9867
9868
9869 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9870 pci_vpd_lrdt_size(&vpd_data[i]);
9871
9872 i += PCI_VPD_LRDT_TAG_SIZE;
9873
9874 if (block_end > BNX2X_VPD_LEN)
9875 goto out_not_found;
9876
9877 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878 PCI_VPD_RO_KEYWORD_MFR_ID);
9879 if (rodi < 0)
9880 goto out_not_found;
9881
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884 if (len != VENDOR_ID_LEN)
9885 goto out_not_found;
9886
9887 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9888
9889 /* vendor specific info */
9890 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9891 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9892 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9893 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9894
9895 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9896 PCI_VPD_RO_KEYWORD_VENDOR0);
9897 if (rodi >= 0) {
9898 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9899
9900 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9901
9902 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9903 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9904 bp->fw_ver[len] = ' ';
9905 }
9906 }
9907 return;
9908 }
9909out_not_found:
9910 return;
9911}
9912
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009913static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9914{
9915 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00009916 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009917 int rc;
9918
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009919 /* Disable interrupt handling until HW is initialized */
9920 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00009921 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009922
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009923 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07009924 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00009925#ifdef BCM_CNIC
9926 mutex_init(&bp->cnic_mutex);
9927#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009928
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009929 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009930 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009931
9932 rc = bnx2x_get_hwinfo(bp);
9933
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00009934 bnx2x_read_fwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009935 /* need to reset chip if undi was active */
9936 if (!BP_NOMCP(bp))
9937 bnx2x_undi_unload(bp);
9938
9939 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009940 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009941
9942 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009943 dev_err(&bp->pdev->dev, "MCP disabled, "
9944 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009945
Eilon Greenstein555f6c72009-02-12 08:36:11 +00009946 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00009947 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9948 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009949 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9950 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00009951 multi_mode = ETH_RSS_MODE_DISABLED;
9952 }
9953 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00009954 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00009955
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07009956 bp->dev->features |= NETIF_F_GRO;
9957
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009958 /* Set TPA flags */
9959 if (disable_tpa) {
9960 bp->flags &= ~TPA_ENABLE_FLAG;
9961 bp->dev->features &= ~NETIF_F_LRO;
9962 } else {
9963 bp->flags |= TPA_ENABLE_FLAG;
9964 bp->dev->features |= NETIF_F_LRO;
9965 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00009966 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009967
Eilon Greensteina18f5122009-08-12 08:23:26 +00009968 if (CHIP_IS_E1(bp))
9969 bp->dropless_fc = 0;
9970 else
9971 bp->dropless_fc = dropless_fc;
9972
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00009973 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009974
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009975 bp->tx_ring_size = MAX_TX_AVAIL;
9976 bp->rx_ring_size = MAX_RX_AVAIL;
9977
9978 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009979
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00009980 /* make sure that the numbers are in the right granularity */
9981 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9982 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009983
Eilon Greenstein87942b42009-02-12 08:36:49 +00009984 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9985 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009986
9987 init_timer(&bp->timer);
9988 bp->timer.expires = jiffies + bp->current_interval;
9989 bp->timer.data = (unsigned long) bp;
9990 bp->timer.function = bnx2x_timer;
9991
9992 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009993}
9994
9995/*
9996 * ethtool service functions
9997 */
9998
9999/* All ethtool functions called with rtnl_lock */
10000
10001static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10002{
10003 struct bnx2x *bp = netdev_priv(dev);
10004
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010005 cmd->supported = bp->port.supported;
10006 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010007
Eilon Greensteinf34d28e2009-10-15 00:18:08 -070010008 if ((bp->state == BNX2X_STATE_OPEN) &&
10009 !(bp->flags & MF_FUNC_DIS) &&
10010 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010011 cmd->speed = bp->link_vars.line_speed;
10012 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -070010013 if (IS_E1HMF(bp)) {
10014 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010015
Eilon Greensteinb015e3d2009-10-15 00:17:20 -070010016 vn_max_rate =
10017 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010018 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -070010019 if (vn_max_rate < cmd->speed)
10020 cmd->speed = vn_max_rate;
10021 }
10022 } else {
10023 cmd->speed = -1;
10024 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010025 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010026
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010027 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10028 u32 ext_phy_type =
10029 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -080010030
10031 switch (ext_phy_type) {
10032 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010033 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010034 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +000010035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10036 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +000010038 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010039 cmd->port = PORT_FIBRE;
10040 break;
10041
10042 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +000010043 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010044 cmd->port = PORT_TP;
10045 break;
10046
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10048 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10049 bp->link_params.ext_phy_config);
10050 break;
10051
Eliezer Tamirf1410642008-02-28 11:51:50 -080010052 default:
10053 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010054 bp->link_params.ext_phy_config);
10055 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010056 }
10057 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010058 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010059
Eilon Greenstein01cd4522009-08-12 08:23:08 +000010060 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010061 cmd->transceiver = XCVR_INTERNAL;
10062
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010063 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010064 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010065 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010066 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010067
10068 cmd->maxtxpkt = 0;
10069 cmd->maxrxpkt = 0;
10070
10071 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10072 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10073 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10074 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10075 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10076 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10077 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10078
10079 return 0;
10080}
10081
10082static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10083{
10084 struct bnx2x *bp = netdev_priv(dev);
10085 u32 advertising;
10086
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010087 if (IS_E1HMF(bp))
10088 return 0;
10089
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010090 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10091 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10092 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10093 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10094 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10095 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10096 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10097
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010098 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010099 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10100 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010101 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010102 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010103
10104 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010105 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010106
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010107 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10108 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010109 bp->port.advertising |= (ADVERTISED_Autoneg |
10110 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010111
10112 } else { /* forced speed */
10113 /* advertise the requested speed and duplex if supported */
10114 switch (cmd->speed) {
10115 case SPEED_10:
10116 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010117 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -080010118 SUPPORTED_10baseT_Full)) {
10119 DP(NETIF_MSG_LINK,
10120 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010121 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010122 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010123
10124 advertising = (ADVERTISED_10baseT_Full |
10125 ADVERTISED_TP);
10126 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010127 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -080010128 SUPPORTED_10baseT_Half)) {
10129 DP(NETIF_MSG_LINK,
10130 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010131 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010132 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010133
10134 advertising = (ADVERTISED_10baseT_Half |
10135 ADVERTISED_TP);
10136 }
10137 break;
10138
10139 case SPEED_100:
10140 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010141 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -080010142 SUPPORTED_100baseT_Full)) {
10143 DP(NETIF_MSG_LINK,
10144 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010145 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010146 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010147
10148 advertising = (ADVERTISED_100baseT_Full |
10149 ADVERTISED_TP);
10150 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010151 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -080010152 SUPPORTED_100baseT_Half)) {
10153 DP(NETIF_MSG_LINK,
10154 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010155 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010156 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010157
10158 advertising = (ADVERTISED_100baseT_Half |
10159 ADVERTISED_TP);
10160 }
10161 break;
10162
10163 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010164 if (cmd->duplex != DUPLEX_FULL) {
10165 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010166 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010167 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010168
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010169 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -080010170 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010171 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010172 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010173
10174 advertising = (ADVERTISED_1000baseT_Full |
10175 ADVERTISED_TP);
10176 break;
10177
10178 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010179 if (cmd->duplex != DUPLEX_FULL) {
10180 DP(NETIF_MSG_LINK,
10181 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010182 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010183 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010184
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010185 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -080010186 DP(NETIF_MSG_LINK,
10187 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010188 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010189 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010190
Eliezer Tamirf1410642008-02-28 11:51:50 -080010191 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010192 ADVERTISED_TP);
10193 break;
10194
10195 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010196 if (cmd->duplex != DUPLEX_FULL) {
10197 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010198 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010199 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010200
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010201 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -080010202 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010203 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010204 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010205
10206 advertising = (ADVERTISED_10000baseT_Full |
10207 ADVERTISED_FIBRE);
10208 break;
10209
10210 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010211 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010212 return -EINVAL;
10213 }
10214
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010215 bp->link_params.req_line_speed = cmd->speed;
10216 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010217 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010218 }
10219
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010220 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010221 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010222 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010223 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010224
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010225 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010226 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010227 bnx2x_link_set(bp);
10228 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010229
10230 return 0;
10231}
10232
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010233#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10234#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10235
10236static int bnx2x_get_regs_len(struct net_device *dev)
10237{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010238 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +000010239 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010240 int i;
10241
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010242 if (CHIP_IS_E1(bp)) {
10243 for (i = 0; i < REGS_COUNT; i++)
10244 if (IS_E1_ONLINE(reg_addrs[i].info))
10245 regdump_len += reg_addrs[i].size;
10246
10247 for (i = 0; i < WREGS_COUNT_E1; i++)
10248 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10249 regdump_len += wreg_addrs_e1[i].size *
10250 (1 + wreg_addrs_e1[i].read_regs_count);
10251
10252 } else { /* E1H */
10253 for (i = 0; i < REGS_COUNT; i++)
10254 if (IS_E1H_ONLINE(reg_addrs[i].info))
10255 regdump_len += reg_addrs[i].size;
10256
10257 for (i = 0; i < WREGS_COUNT_E1H; i++)
10258 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10259 regdump_len += wreg_addrs_e1h[i].size *
10260 (1 + wreg_addrs_e1h[i].read_regs_count);
10261 }
10262 regdump_len *= 4;
10263 regdump_len += sizeof(struct dump_hdr);
10264
10265 return regdump_len;
10266}
10267
10268static void bnx2x_get_regs(struct net_device *dev,
10269 struct ethtool_regs *regs, void *_p)
10270{
10271 u32 *p = _p, i, j;
10272 struct bnx2x *bp = netdev_priv(dev);
10273 struct dump_hdr dump_hdr = {0};
10274
10275 regs->version = 0;
10276 memset(p, 0, regs->len);
10277
10278 if (!netif_running(bp->dev))
10279 return;
10280
10281 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10282 dump_hdr.dump_sign = dump_sign_all;
10283 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10284 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10285 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10286 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10287 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10288
10289 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10290 p += dump_hdr.hdr_size + 1;
10291
10292 if (CHIP_IS_E1(bp)) {
10293 for (i = 0; i < REGS_COUNT; i++)
10294 if (IS_E1_ONLINE(reg_addrs[i].info))
10295 for (j = 0; j < reg_addrs[i].size; j++)
10296 *p++ = REG_RD(bp,
10297 reg_addrs[i].addr + j*4);
10298
10299 } else { /* E1H */
10300 for (i = 0; i < REGS_COUNT; i++)
10301 if (IS_E1H_ONLINE(reg_addrs[i].info))
10302 for (j = 0; j < reg_addrs[i].size; j++)
10303 *p++ = REG_RD(bp,
10304 reg_addrs[i].addr + j*4);
10305 }
10306}
10307
Eilon Greenstein0d28e492009-08-12 08:23:40 +000010308#define PHY_FW_VER_LEN 10
10309
10310static void bnx2x_get_drvinfo(struct net_device *dev,
10311 struct ethtool_drvinfo *info)
10312{
10313 struct bnx2x *bp = netdev_priv(dev);
10314 u8 phy_fw_ver[PHY_FW_VER_LEN];
10315
10316 strcpy(info->driver, DRV_MODULE_NAME);
10317 strcpy(info->version, DRV_MODULE_VERSION);
10318
10319 phy_fw_ver[0] = '\0';
10320 if (bp->port.pmf) {
10321 bnx2x_acquire_phy_lock(bp);
10322 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10323 (bp->state != BNX2X_STATE_CLOSED),
10324 phy_fw_ver, PHY_FW_VER_LEN);
10325 bnx2x_release_phy_lock(bp);
10326 }
10327
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +000010328 strncpy(info->fw_version, bp->fw_ver, 32);
10329 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10330 "bc %d.%d.%d%s%s",
Eilon Greenstein0d28e492009-08-12 08:23:40 +000010331 (bp->common.bc_ver & 0xff0000) >> 16,
10332 (bp->common.bc_ver & 0xff00) >> 8,
10333 (bp->common.bc_ver & 0xff),
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +000010334 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
Eilon Greenstein0d28e492009-08-12 08:23:40 +000010335 strcpy(info->bus_info, pci_name(bp->pdev));
10336 info->n_stats = BNX2X_NUM_STATS;
10337 info->testinfo_len = BNX2X_NUM_TESTS;
10338 info->eedump_len = bp->common.flash_size;
10339 info->regdump_len = bnx2x_get_regs_len(dev);
10340}
10341
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010342static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10343{
10344 struct bnx2x *bp = netdev_priv(dev);
10345
10346 if (bp->flags & NO_WOL_FLAG) {
10347 wol->supported = 0;
10348 wol->wolopts = 0;
10349 } else {
10350 wol->supported = WAKE_MAGIC;
10351 if (bp->wol)
10352 wol->wolopts = WAKE_MAGIC;
10353 else
10354 wol->wolopts = 0;
10355 }
10356 memset(&wol->sopass, 0, sizeof(wol->sopass));
10357}
10358
10359static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10360{
10361 struct bnx2x *bp = netdev_priv(dev);
10362
10363 if (wol->wolopts & ~WAKE_MAGIC)
10364 return -EINVAL;
10365
10366 if (wol->wolopts & WAKE_MAGIC) {
10367 if (bp->flags & NO_WOL_FLAG)
10368 return -EINVAL;
10369
10370 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010371 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010372 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010373
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010374 return 0;
10375}
10376
10377static u32 bnx2x_get_msglevel(struct net_device *dev)
10378{
10379 struct bnx2x *bp = netdev_priv(dev);
10380
Joe Perches7995c642010-02-17 15:01:52 +000010381 return bp->msg_enable;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010382}
10383
10384static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10385{
10386 struct bnx2x *bp = netdev_priv(dev);
10387
10388 if (capable(CAP_NET_ADMIN))
Joe Perches7995c642010-02-17 15:01:52 +000010389 bp->msg_enable = level;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010390}
10391
10392static int bnx2x_nway_reset(struct net_device *dev)
10393{
10394 struct bnx2x *bp = netdev_priv(dev);
10395
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010396 if (!bp->port.pmf)
10397 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010398
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010399 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010400 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010401 bnx2x_link_set(bp);
10402 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010403
10404 return 0;
10405}
10406
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010407static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010408{
10409 struct bnx2x *bp = netdev_priv(dev);
10410
Eilon Greensteinf34d28e2009-10-15 00:18:08 -070010411 if (bp->flags & MF_FUNC_DIS)
10412 return 0;
10413
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010414 return bp->link_vars.link_up;
10415}
10416
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010417static int bnx2x_get_eeprom_len(struct net_device *dev)
10418{
10419 struct bnx2x *bp = netdev_priv(dev);
10420
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010421 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010422}
10423
10424static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10425{
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010426 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010427 int count, i;
10428 u32 val = 0;
10429
10430 /* adjust timeout for emulation/FPGA */
10431 count = NVRAM_TIMEOUT_COUNT;
10432 if (CHIP_REV_IS_SLOW(bp))
10433 count *= 100;
10434
10435 /* request access to nvram interface */
10436 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10437 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10438
10439 for (i = 0; i < count*10; i++) {
10440 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10441 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10442 break;
10443
10444 udelay(5);
10445 }
10446
10447 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010448 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010449 return -EBUSY;
10450 }
10451
10452 return 0;
10453}
10454
10455static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10456{
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010457 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010458 int count, i;
10459 u32 val = 0;
10460
10461 /* adjust timeout for emulation/FPGA */
10462 count = NVRAM_TIMEOUT_COUNT;
10463 if (CHIP_REV_IS_SLOW(bp))
10464 count *= 100;
10465
10466 /* relinquish nvram interface */
10467 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10468 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10469
10470 for (i = 0; i < count*10; i++) {
10471 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10472 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10473 break;
10474
10475 udelay(5);
10476 }
10477
10478 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010479 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010480 return -EBUSY;
10481 }
10482
10483 return 0;
10484}
10485
10486static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10487{
10488 u32 val;
10489
10490 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10491
10492 /* enable both bits, even on read */
10493 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10494 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10495 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10496}
10497
10498static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10499{
10500 u32 val;
10501
10502 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10503
10504 /* disable both bits, even after read */
10505 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10506 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10507 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10508}
10509
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010510static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010511 u32 cmd_flags)
10512{
Eliezer Tamirf1410642008-02-28 11:51:50 -080010513 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010514 u32 val;
10515
10516 /* build the command word */
10517 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10518
10519 /* need to clear DONE bit separately */
10520 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10521
10522 /* address of the NVRAM to read from */
10523 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10524 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10525
10526 /* issue a read command */
10527 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10528
10529 /* adjust timeout for emulation/FPGA */
10530 count = NVRAM_TIMEOUT_COUNT;
10531 if (CHIP_REV_IS_SLOW(bp))
10532 count *= 100;
10533
10534 /* wait for completion */
10535 *ret_val = 0;
10536 rc = -EBUSY;
10537 for (i = 0; i < count; i++) {
10538 udelay(5);
10539 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10540
10541 if (val & MCPR_NVM_COMMAND_DONE) {
10542 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010543 /* we read nvram data in cpu order
10544 * but ethtool sees it as an array of bytes
10545 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010546 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010547 rc = 0;
10548 break;
10549 }
10550 }
10551
10552 return rc;
10553}
10554
10555static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10556 int buf_size)
10557{
10558 int rc;
10559 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010560 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010561
10562 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010563 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010564 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010565 offset, buf_size);
10566 return -EINVAL;
10567 }
10568
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010569 if (offset + buf_size > bp->common.flash_size) {
10570 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010571 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010572 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010573 return -EINVAL;
10574 }
10575
10576 /* request access to nvram interface */
10577 rc = bnx2x_acquire_nvram_lock(bp);
10578 if (rc)
10579 return rc;
10580
10581 /* enable access to nvram interface */
10582 bnx2x_enable_nvram_access(bp);
10583
10584 /* read the first word(s) */
10585 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10586 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10587 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10588 memcpy(ret_buf, &val, 4);
10589
10590 /* advance to the next dword */
10591 offset += sizeof(u32);
10592 ret_buf += sizeof(u32);
10593 buf_size -= sizeof(u32);
10594 cmd_flags = 0;
10595 }
10596
10597 if (rc == 0) {
10598 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10599 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10600 memcpy(ret_buf, &val, 4);
10601 }
10602
10603 /* disable access to nvram interface */
10604 bnx2x_disable_nvram_access(bp);
10605 bnx2x_release_nvram_lock(bp);
10606
10607 return rc;
10608}
10609
10610static int bnx2x_get_eeprom(struct net_device *dev,
10611 struct ethtool_eeprom *eeprom, u8 *eebuf)
10612{
10613 struct bnx2x *bp = netdev_priv(dev);
10614 int rc;
10615
Eilon Greenstein2add3ac2009-01-14 06:44:07 +000010616 if (!netif_running(dev))
10617 return -EAGAIN;
10618
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010619 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010620 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10621 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10622 eeprom->len, eeprom->len);
10623
10624 /* parameters already validated in ethtool_get_eeprom */
10625
10626 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10627
10628 return rc;
10629}
10630
10631static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10632 u32 cmd_flags)
10633{
Eliezer Tamirf1410642008-02-28 11:51:50 -080010634 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010635
10636 /* build the command word */
10637 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10638
10639 /* need to clear DONE bit separately */
10640 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10641
10642 /* write the data */
10643 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10644
10645 /* address of the NVRAM to write to */
10646 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10647 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10648
10649 /* issue the write command */
10650 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10651
10652 /* adjust timeout for emulation/FPGA */
10653 count = NVRAM_TIMEOUT_COUNT;
10654 if (CHIP_REV_IS_SLOW(bp))
10655 count *= 100;
10656
10657 /* wait for completion */
10658 rc = -EBUSY;
10659 for (i = 0; i < count; i++) {
10660 udelay(5);
10661 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10662 if (val & MCPR_NVM_COMMAND_DONE) {
10663 rc = 0;
10664 break;
10665 }
10666 }
10667
10668 return rc;
10669}
10670
Eliezer Tamirf1410642008-02-28 11:51:50 -080010671#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010672
10673static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10674 int buf_size)
10675{
10676 int rc;
10677 u32 cmd_flags;
10678 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010679 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010680
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010681 if (offset + buf_size > bp->common.flash_size) {
10682 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010683 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010684 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010685 return -EINVAL;
10686 }
10687
10688 /* request access to nvram interface */
10689 rc = bnx2x_acquire_nvram_lock(bp);
10690 if (rc)
10691 return rc;
10692
10693 /* enable access to nvram interface */
10694 bnx2x_enable_nvram_access(bp);
10695
10696 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10697 align_offset = (offset & ~0x03);
10698 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10699
10700 if (rc == 0) {
10701 val &= ~(0xff << BYTE_OFFSET(offset));
10702 val |= (*data_buf << BYTE_OFFSET(offset));
10703
10704 /* nvram data is returned as an array of bytes
10705 * convert it back to cpu order */
10706 val = be32_to_cpu(val);
10707
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010708 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10709 cmd_flags);
10710 }
10711
10712 /* disable access to nvram interface */
10713 bnx2x_disable_nvram_access(bp);
10714 bnx2x_release_nvram_lock(bp);
10715
10716 return rc;
10717}
10718
10719static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10720 int buf_size)
10721{
10722 int rc;
10723 u32 cmd_flags;
10724 u32 val;
10725 u32 written_so_far;
10726
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010727 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010728 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010729
10730 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010731 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010732 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010733 offset, buf_size);
10734 return -EINVAL;
10735 }
10736
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010737 if (offset + buf_size > bp->common.flash_size) {
10738 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010739 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010740 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010741 return -EINVAL;
10742 }
10743
10744 /* request access to nvram interface */
10745 rc = bnx2x_acquire_nvram_lock(bp);
10746 if (rc)
10747 return rc;
10748
10749 /* enable access to nvram interface */
10750 bnx2x_enable_nvram_access(bp);
10751
10752 written_so_far = 0;
10753 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10754 while ((written_so_far < buf_size) && (rc == 0)) {
10755 if (written_so_far == (buf_size - sizeof(u32)))
10756 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10757 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10758 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10759 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10760 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10761
10762 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010763
10764 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10765
10766 /* advance to the next dword */
10767 offset += sizeof(u32);
10768 data_buf += sizeof(u32);
10769 written_so_far += sizeof(u32);
10770 cmd_flags = 0;
10771 }
10772
10773 /* disable access to nvram interface */
10774 bnx2x_disable_nvram_access(bp);
10775 bnx2x_release_nvram_lock(bp);
10776
10777 return rc;
10778}
10779
10780static int bnx2x_set_eeprom(struct net_device *dev,
10781 struct ethtool_eeprom *eeprom, u8 *eebuf)
10782{
10783 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010784 int port = BP_PORT(bp);
10785 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010786
Eilon Greenstein9f4c9582009-01-08 11:21:43 -080010787 if (!netif_running(dev))
10788 return -EAGAIN;
10789
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010790 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010791 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10792 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10793 eeprom->len, eeprom->len);
10794
10795 /* parameters already validated in ethtool_set_eeprom */
10796
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010797 /* PHY eeprom can be accessed only by the PMF */
10798 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10799 !bp->port.pmf)
10800 return -EINVAL;
10801
10802 if (eeprom->magic == 0x50485950) {
10803 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10804 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10805
10806 bnx2x_acquire_phy_lock(bp);
10807 rc |= bnx2x_link_reset(&bp->link_params,
10808 &bp->link_vars, 0);
10809 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10810 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10811 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10812 MISC_REGISTERS_GPIO_HIGH, port);
10813 bnx2x_release_phy_lock(bp);
10814 bnx2x_link_report(bp);
10815
10816 } else if (eeprom->magic == 0x50485952) {
10817 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -070010818 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010819 bnx2x_acquire_phy_lock(bp);
10820 rc |= bnx2x_link_reset(&bp->link_params,
10821 &bp->link_vars, 1);
10822
10823 rc |= bnx2x_phy_init(&bp->link_params,
10824 &bp->link_vars);
10825 bnx2x_release_phy_lock(bp);
10826 bnx2x_calc_fc_adv(bp);
10827 }
10828 } else if (eeprom->magic == 0x53985943) {
10829 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10830 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10831 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10832 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +000010833 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010834
10835 /* DSP Remove Download Mode */
10836 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10837 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -070010839 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010840
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010841 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10842
10843 /* wait 0.5 sec to allow it to run */
10844 msleep(500);
10845 bnx2x_ext_phy_hw_reset(bp, port);
10846 msleep(500);
10847 bnx2x_release_phy_lock(bp);
10848 }
10849 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010850 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010851
10852 return rc;
10853}
10854
10855static int bnx2x_get_coalesce(struct net_device *dev,
10856 struct ethtool_coalesce *coal)
10857{
10858 struct bnx2x *bp = netdev_priv(dev);
10859
10860 memset(coal, 0, sizeof(struct ethtool_coalesce));
10861
10862 coal->rx_coalesce_usecs = bp->rx_ticks;
10863 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010864
10865 return 0;
10866}
10867
10868static int bnx2x_set_coalesce(struct net_device *dev,
10869 struct ethtool_coalesce *coal)
10870{
10871 struct bnx2x *bp = netdev_priv(dev);
10872
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000010873 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10874 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10875 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010876
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000010877 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10878 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10879 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010880
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010881 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010882 bnx2x_update_coalesce(bp);
10883
10884 return 0;
10885}
10886
10887static void bnx2x_get_ringparam(struct net_device *dev,
10888 struct ethtool_ringparam *ering)
10889{
10890 struct bnx2x *bp = netdev_priv(dev);
10891
10892 ering->rx_max_pending = MAX_RX_AVAIL;
10893 ering->rx_mini_max_pending = 0;
10894 ering->rx_jumbo_max_pending = 0;
10895
10896 ering->rx_pending = bp->rx_ring_size;
10897 ering->rx_mini_pending = 0;
10898 ering->rx_jumbo_pending = 0;
10899
10900 ering->tx_max_pending = MAX_TX_AVAIL;
10901 ering->tx_pending = bp->tx_ring_size;
10902}
10903
10904static int bnx2x_set_ringparam(struct net_device *dev,
10905 struct ethtool_ringparam *ering)
10906{
10907 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010908 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010909
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000010910 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10911 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10912 return -EAGAIN;
10913 }
10914
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010915 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10916 (ering->tx_pending > MAX_TX_AVAIL) ||
10917 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10918 return -EINVAL;
10919
10920 bp->rx_ring_size = ering->rx_pending;
10921 bp->tx_ring_size = ering->tx_pending;
10922
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010923 if (netif_running(dev)) {
10924 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10925 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010926 }
10927
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010928 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010929}
10930
10931static void bnx2x_get_pauseparam(struct net_device *dev,
10932 struct ethtool_pauseparam *epause)
10933{
10934 struct bnx2x *bp = netdev_priv(dev);
10935
Eilon Greenstein356e2382009-02-12 08:38:32 +000010936 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10937 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010938 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10939
David S. Millerc0700f92008-12-16 23:53:20 -080010940 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10941 BNX2X_FLOW_CTRL_RX);
10942 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10943 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010944
10945 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10946 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10947 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10948}
10949
10950static int bnx2x_set_pauseparam(struct net_device *dev,
10951 struct ethtool_pauseparam *epause)
10952{
10953 struct bnx2x *bp = netdev_priv(dev);
10954
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010955 if (IS_E1HMF(bp))
10956 return 0;
10957
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010958 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10959 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10960 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10961
David S. Millerc0700f92008-12-16 23:53:20 -080010962 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010963
10964 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -080010965 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010966
10967 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -080010968 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010969
David S. Millerc0700f92008-12-16 23:53:20 -080010970 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10971 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010972
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010973 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010974 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -070010975 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -080010976 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010977 }
10978
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010979 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -080010980 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010981 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010982
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010983 DP(NETIF_MSG_LINK,
10984 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010985
10986 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010987 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010988 bnx2x_link_set(bp);
10989 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010990
10991 return 0;
10992}
10993
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010994static int bnx2x_set_flags(struct net_device *dev, u32 data)
10995{
10996 struct bnx2x *bp = netdev_priv(dev);
10997 int changed = 0;
10998 int rc = 0;
10999
Stanislaw Gruszkae0d904f2010-06-27 23:28:11 +000011000 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
Ben Hutchings97d19352010-06-30 02:46:56 +000011001 return -EINVAL;
Stanislaw Gruszkae0d904f2010-06-27 23:28:11 +000011002
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000011003 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11004 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11005 return -EAGAIN;
11006 }
11007
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070011008 /* TPA requires Rx CSUM offloading */
11009 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +000011010 if (!bp->disable_tpa) {
Vladislav Zolotarovd43a7e62010-02-17 02:03:40 +000011011 if (!(dev->features & NETIF_F_LRO)) {
11012 dev->features |= NETIF_F_LRO;
11013 bp->flags |= TPA_ENABLE_FLAG;
11014 changed = 1;
11015 }
11016 } else
11017 rc = -EINVAL;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070011018 } else if (dev->features & NETIF_F_LRO) {
11019 dev->features &= ~NETIF_F_LRO;
11020 bp->flags &= ~TPA_ENABLE_FLAG;
11021 changed = 1;
11022 }
11023
Tom Herbertc68ed252010-04-23 00:10:52 -070011024 if (data & ETH_FLAG_RXHASH)
11025 dev->features |= NETIF_F_RXHASH;
11026 else
11027 dev->features &= ~NETIF_F_RXHASH;
11028
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070011029 if (changed && netif_running(dev)) {
11030 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11031 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11032 }
11033
11034 return rc;
11035}
11036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011037static u32 bnx2x_get_rx_csum(struct net_device *dev)
11038{
11039 struct bnx2x *bp = netdev_priv(dev);
11040
11041 return bp->rx_csum;
11042}
11043
11044static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11045{
11046 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070011047 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011048
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000011049 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11050 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11051 return -EAGAIN;
11052 }
11053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011054 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070011055
11056 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11057 TPA'ed packets will be discarded due to wrong TCP CSUM */
11058 if (!data) {
11059 u32 flags = ethtool_op_get_flags(dev);
11060
11061 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11062 }
11063
11064 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011065}
11066
11067static int bnx2x_set_tso(struct net_device *dev, u32 data)
11068{
Eilon Greenstein755735e2008-06-23 20:35:13 -070011069 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011070 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011071 dev->features |= NETIF_F_TSO6;
11072 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011073 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011074 dev->features &= ~NETIF_F_TSO6;
11075 }
11076
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011077 return 0;
11078}
11079
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011080static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011081 char string[ETH_GSTRING_LEN];
11082} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011083 { "register_test (offline)" },
11084 { "memory_test (offline)" },
11085 { "loopback_test (offline)" },
11086 { "nvram_test (online)" },
11087 { "interrupt_test (online)" },
11088 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000011089 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011090};
11091
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011092static int bnx2x_test_registers(struct bnx2x *bp)
11093{
11094 int idx, i, rc = -ENODEV;
11095 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011096 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011097 static const struct {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000011098 u32 offset0;
11099 u32 offset1;
11100 u32 mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011101 } reg_tbl[] = {
11102/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11103 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11104 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11105 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11106 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11107 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11108 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11109 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11110 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11111 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11112/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11113 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11114 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11115 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11116 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11117 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11118 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11119 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011120 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000011121 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11122/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011123 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11124 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11125 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11126 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11127 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11128 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11129 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11130 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000011131 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11132/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011133 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11134 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11135 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11136 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11137 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11138 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11139
11140 { 0xffffffff, 0, 0x00000000 }
11141 };
11142
11143 if (!netif_running(bp->dev))
11144 return rc;
11145
11146 /* Repeat the test twice:
11147 First by writing 0x00000000, second by writing 0xffffffff */
11148 for (idx = 0; idx < 2; idx++) {
11149
11150 switch (idx) {
11151 case 0:
11152 wr_val = 0;
11153 break;
11154 case 1:
11155 wr_val = 0xffffffff;
11156 break;
11157 }
11158
11159 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11160 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011161
11162 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11163 mask = reg_tbl[i].mask;
11164
11165 save_val = REG_RD(bp, offset);
11166
Vladislav Zolotarov8eb5a202010-04-19 01:14:37 +000011167 REG_WR(bp, offset, (wr_val & mask));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011168 val = REG_RD(bp, offset);
11169
11170 /* Restore the original register's value */
11171 REG_WR(bp, offset, save_val);
11172
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000011173 /* verify value is as expected */
11174 if ((val & mask) != (wr_val & mask)) {
11175 DP(NETIF_MSG_PROBE,
11176 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11177 offset, val, wr_val, mask);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011178 goto test_reg_exit;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000011179 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011180 }
11181 }
11182
11183 rc = 0;
11184
11185test_reg_exit:
11186 return rc;
11187}
11188
11189static int bnx2x_test_memory(struct bnx2x *bp)
11190{
11191 int i, j, rc = -ENODEV;
11192 u32 val;
11193 static const struct {
11194 u32 offset;
11195 int size;
11196 } mem_tbl[] = {
11197 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11198 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11199 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11200 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11201 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11202 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11203 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11204
11205 { 0xffffffff, 0 }
11206 };
11207 static const struct {
11208 char *name;
11209 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011210 u32 e1_mask;
11211 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011212 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011213 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11214 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11215 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11216 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11217 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11218 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011219
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011220 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011221 };
11222
11223 if (!netif_running(bp->dev))
11224 return rc;
11225
11226 /* Go through all the memories */
11227 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11228 for (j = 0; j < mem_tbl[i].size; j++)
11229 REG_RD(bp, mem_tbl[i].offset + j*4);
11230
11231 /* Check the parity status */
11232 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11233 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011234 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11235 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011236 DP(NETIF_MSG_HW,
11237 "%s is 0x%x\n", prty_tbl[i].name, val);
11238 goto test_mem_exit;
11239 }
11240 }
11241
11242 rc = 0;
11243
11244test_mem_exit:
11245 return rc;
11246}
11247
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011248static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11249{
11250 int cnt = 1000;
11251
11252 if (link_up)
11253 while (bnx2x_link_test(bp) && cnt--)
11254 msleep(10);
11255}
11256
11257static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11258{
11259 unsigned int pkt_size, num_pkts, i;
11260 struct sk_buff *skb;
11261 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070011262 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011263 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011264 u16 tx_start_idx, tx_idx;
11265 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070011266 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011267 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011268 struct eth_tx_start_bd *tx_start_bd;
11269 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011270 dma_addr_t mapping;
11271 union eth_rx_cqe *cqe;
11272 u8 cqe_fp_flags;
11273 struct sw_rx_bd *rx_buf;
11274 u16 len;
11275 int rc = -ENODEV;
11276
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011277 /* check the loopback mode */
11278 switch (loopback_mode) {
11279 case BNX2X_PHY_LOOPBACK:
11280 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11281 return -EINVAL;
11282 break;
11283 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011284 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011285 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011286 break;
11287 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011288 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011289 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011290
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011291 /* prepare the loopback packet */
11292 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11293 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011294 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11295 if (!skb) {
11296 rc = -ENOMEM;
11297 goto test_loopback_exit;
11298 }
11299 packet = skb_put(skb, pkt_size);
11300 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070011301 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11302 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011303 for (i = ETH_HLEN; i < pkt_size; i++)
11304 packet[i] = (unsigned char) (i & 0xff);
11305
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011306 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011307 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011308 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11309 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011310
Eilon Greensteinca003922009-08-12 22:53:28 -070011311 pkt_prod = fp_tx->tx_pkt_prod++;
11312 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11313 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011314 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011315 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011316
Eilon Greensteinca003922009-08-12 22:53:28 -070011317 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11318 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
FUJITA Tomonori1a983142010-04-04 01:51:03 +000011319 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11320 skb_headlen(skb), DMA_TO_DEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070011321 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11322 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11323 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11324 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11325 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11326 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11327 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11328 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11329
11330 /* turn on parsing and get a BD */
11331 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11332 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11333
11334 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011335
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011336 wmb();
11337
Eilon Greensteinca003922009-08-12 22:53:28 -070011338 fp_tx->tx_db.data.prod += 2;
11339 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011340 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011341
11342 mmiowb();
11343
11344 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070011345 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011346
11347 udelay(100);
11348
Eilon Greensteinca003922009-08-12 22:53:28 -070011349 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011350 if (tx_idx != tx_start_idx + num_pkts)
11351 goto test_loopback_exit;
11352
Eilon Greensteinca003922009-08-12 22:53:28 -070011353 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011354 if (rx_idx != rx_start_idx + num_pkts)
11355 goto test_loopback_exit;
11356
Eilon Greensteinca003922009-08-12 22:53:28 -070011357 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011358 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11359 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11360 goto test_loopback_rx_exit;
11361
11362 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11363 if (len != pkt_size)
11364 goto test_loopback_rx_exit;
11365
Eilon Greensteinca003922009-08-12 22:53:28 -070011366 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011367 skb = rx_buf->skb;
11368 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11369 for (i = ETH_HLEN; i < pkt_size; i++)
11370 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11371 goto test_loopback_rx_exit;
11372
11373 rc = 0;
11374
11375test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011376
Eilon Greensteinca003922009-08-12 22:53:28 -070011377 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11378 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11379 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11380 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011381
11382 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070011383 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11384 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011385
11386test_loopback_exit:
11387 bp->link_params.loopback_mode = LOOPBACK_NONE;
11388
11389 return rc;
11390}
11391
11392static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11393{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011394 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011395
Vladislav Zolotarov2145a922010-04-19 01:13:49 +000011396 if (BP_NOMCP(bp))
11397 return rc;
11398
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011399 if (!netif_running(bp->dev))
11400 return BNX2X_LOOPBACK_FAILED;
11401
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011402 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000011403 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011404
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011405 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11406 if (res) {
11407 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11408 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011409 }
11410
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011411 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11412 if (res) {
11413 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11414 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011415 }
11416
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000011417 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011418 bnx2x_netif_start(bp);
11419
11420 return rc;
11421}
11422
11423#define CRC32_RESIDUAL 0xdebb20e3
11424
11425static int bnx2x_test_nvram(struct bnx2x *bp)
11426{
11427 static const struct {
11428 int offset;
11429 int size;
11430 } nvram_tbl[] = {
11431 { 0, 0x14 }, /* bootstrap */
11432 { 0x14, 0xec }, /* dir */
11433 { 0x100, 0x350 }, /* manuf_info */
11434 { 0x450, 0xf0 }, /* feature_info */
11435 { 0x640, 0x64 }, /* upgrade_key_info */
11436 { 0x6a4, 0x64 },
11437 { 0x708, 0x70 }, /* manuf_key_info */
11438 { 0x778, 0x70 },
11439 { 0, 0 }
11440 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011441 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011442 u8 *data = (u8 *)buf;
11443 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011444 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011445
Vladislav Zolotarov2145a922010-04-19 01:13:49 +000011446 if (BP_NOMCP(bp))
11447 return 0;
11448
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011449 rc = bnx2x_nvram_read(bp, 0, data, 4);
11450 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000011451 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011452 goto test_nvram_exit;
11453 }
11454
11455 magic = be32_to_cpu(buf[0]);
11456 if (magic != 0x669955aa) {
11457 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11458 rc = -ENODEV;
11459 goto test_nvram_exit;
11460 }
11461
11462 for (i = 0; nvram_tbl[i].size; i++) {
11463
11464 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11465 nvram_tbl[i].size);
11466 if (rc) {
11467 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000011468 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011469 goto test_nvram_exit;
11470 }
11471
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011472 crc = ether_crc_le(nvram_tbl[i].size, data);
11473 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011474 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011475 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011476 rc = -ENODEV;
11477 goto test_nvram_exit;
11478 }
11479 }
11480
11481test_nvram_exit:
11482 return rc;
11483}
11484
11485static int bnx2x_test_intr(struct bnx2x *bp)
11486{
11487 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11488 int i, rc;
11489
11490 if (!netif_running(bp->dev))
11491 return -ENODEV;
11492
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011493 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000011494 if (CHIP_IS_E1(bp))
Vladislav Zolotarov0c43f432010-02-17 02:04:00 +000011495 /* use last unicast entries */
11496 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
Eilon Greensteinaf246402009-01-14 06:43:59 +000011497 else
11498 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000011499 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011500 config->hdr.reserved1 = 0;
11501
Michael Chane665bfd2009-10-10 13:46:54 +000011502 bp->set_mac_pending++;
11503 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011504 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11505 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11506 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11507 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011508 for (i = 0; i < 10; i++) {
11509 if (!bp->set_mac_pending)
11510 break;
Michael Chane665bfd2009-10-10 13:46:54 +000011511 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011512 msleep_interruptible(10);
11513 }
11514 if (i == 10)
11515 rc = -ENODEV;
11516 }
11517
11518 return rc;
11519}
11520
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011521static void bnx2x_self_test(struct net_device *dev,
11522 struct ethtool_test *etest, u64 *buf)
11523{
11524 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011525
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000011526 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11527 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11528 etest->flags |= ETH_TEST_FL_FAILED;
11529 return;
11530 }
11531
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011532 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11533
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011534 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011535 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011536
Eilon Greenstein33471622008-08-13 15:59:08 -070011537 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011538 if (IS_E1HMF(bp))
11539 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11540
11541 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000011542 int port = BP_PORT(bp);
11543 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011544 u8 link_up;
11545
Eilon Greenstein279abdf2009-07-21 05:47:22 +000011546 /* save current value of input enable for TX port IF */
11547 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11548 /* disable input for TX port IF */
11549 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11550
Eilon Greenstein061bc702009-10-15 00:18:47 -070011551 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011552 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11553 bnx2x_nic_load(bp, LOAD_DIAG);
11554 /* wait until link state is restored */
11555 bnx2x_wait_for_link(bp, link_up);
11556
11557 if (bnx2x_test_registers(bp) != 0) {
11558 buf[0] = 1;
11559 etest->flags |= ETH_TEST_FL_FAILED;
11560 }
11561 if (bnx2x_test_memory(bp) != 0) {
11562 buf[1] = 1;
11563 etest->flags |= ETH_TEST_FL_FAILED;
11564 }
11565 buf[2] = bnx2x_test_loopback(bp, link_up);
11566 if (buf[2] != 0)
11567 etest->flags |= ETH_TEST_FL_FAILED;
11568
11569 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000011570
11571 /* restore input for TX port IF */
11572 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11573
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011574 bnx2x_nic_load(bp, LOAD_NORMAL);
11575 /* wait until link state is restored */
11576 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011577 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011578 if (bnx2x_test_nvram(bp) != 0) {
11579 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011580 etest->flags |= ETH_TEST_FL_FAILED;
11581 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011582 if (bnx2x_test_intr(bp) != 0) {
11583 buf[4] = 1;
11584 etest->flags |= ETH_TEST_FL_FAILED;
11585 }
11586 if (bp->port.pmf)
11587 if (bnx2x_link_test(bp) != 0) {
11588 buf[5] = 1;
11589 etest->flags |= ETH_TEST_FL_FAILED;
11590 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011591
11592#ifdef BNX2X_EXTRA_DEBUG
11593 bnx2x_panic_dump(bp);
11594#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011595}
11596
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011597static const struct {
11598 long offset;
11599 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000011600 u8 string[ETH_GSTRING_LEN];
11601} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11602/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11603 { Q_STATS_OFFSET32(error_bytes_received_hi),
11604 8, "[%d]: rx_error_bytes" },
11605 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11606 8, "[%d]: rx_ucast_packets" },
11607 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11608 8, "[%d]: rx_mcast_packets" },
11609 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11610 8, "[%d]: rx_bcast_packets" },
11611 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11612 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11613 4, "[%d]: rx_phy_ip_err_discards"},
11614 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11615 4, "[%d]: rx_skb_alloc_discard" },
11616 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11617
11618/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11619 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000011620 8, "[%d]: tx_ucast_packets" },
11621 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11622 8, "[%d]: tx_mcast_packets" },
11623 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11624 8, "[%d]: tx_bcast_packets" }
Eilon Greensteinde832a52009-02-12 08:36:33 +000011625};
11626
11627static const struct {
11628 long offset;
11629 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011630 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011631#define STATS_FLAGS_PORT 1
11632#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000011633#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011634 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011635} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000011636/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11637 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011638 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011639 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011640 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011641 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011642 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011643 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011644 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011645 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011646 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011647 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011648 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011649 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011650 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11651 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11652 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11653 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11654/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11655 8, STATS_FLAGS_PORT, "rx_fragments" },
11656 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11657 8, STATS_FLAGS_PORT, "rx_jabbers" },
11658 { STATS_OFFSET32(no_buff_discard_hi),
11659 8, STATS_FLAGS_BOTH, "rx_discards" },
11660 { STATS_OFFSET32(mac_filter_discard),
11661 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11662 { STATS_OFFSET32(xxoverflow_discard),
11663 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11664 { STATS_OFFSET32(brb_drop_hi),
11665 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11666 { STATS_OFFSET32(brb_truncate_hi),
11667 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11668 { STATS_OFFSET32(pause_frames_received_hi),
11669 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11670 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11671 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11672 { STATS_OFFSET32(nig_timer_max),
11673 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11674/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11675 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11676 { STATS_OFFSET32(rx_skb_alloc_failed),
11677 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11678 { STATS_OFFSET32(hw_csum_err),
11679 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11680
11681 { STATS_OFFSET32(total_bytes_transmitted_hi),
11682 8, STATS_FLAGS_BOTH, "tx_bytes" },
11683 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11684 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11685 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000011686 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11687 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11688 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11689 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11690 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011691 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11692 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11693 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11694 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000011695/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011696 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011697 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011698 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000011699 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011700 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011701 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011702 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011703 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011704 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011705 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011706 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011707 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011708 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011709 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011710 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011711 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011712 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011713 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011714 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000011715/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011716 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011717 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011718 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000011719 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011720 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011721 { STATS_OFFSET32(pause_frames_sent_hi),
11722 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011723};
11724
Eilon Greensteinde832a52009-02-12 08:36:33 +000011725#define IS_PORT_STAT(i) \
11726 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11727#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11728#define IS_E1HMF_MODE_STAT(bp) \
Joe Perches7995c642010-02-17 15:01:52 +000011729 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011730
Ben Hutchings15f0a392009-10-01 11:58:24 +000011731static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11732{
11733 struct bnx2x *bp = netdev_priv(dev);
11734 int i, num_stats;
11735
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000011736 switch (stringset) {
Ben Hutchings15f0a392009-10-01 11:58:24 +000011737 case ETH_SS_STATS:
11738 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011739 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000011740 if (!IS_E1HMF_MODE_STAT(bp))
11741 num_stats += BNX2X_NUM_STATS;
11742 } else {
11743 if (IS_E1HMF_MODE_STAT(bp)) {
11744 num_stats = 0;
11745 for (i = 0; i < BNX2X_NUM_STATS; i++)
11746 if (IS_FUNC_STAT(i))
11747 num_stats++;
11748 } else
11749 num_stats = BNX2X_NUM_STATS;
11750 }
11751 return num_stats;
11752
11753 case ETH_SS_TEST:
11754 return BNX2X_NUM_TESTS;
11755
11756 default:
11757 return -EINVAL;
11758 }
11759}
11760
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011761static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11762{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011763 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000011764 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011765
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011766 switch (stringset) {
11767 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000011768 if (is_multi(bp)) {
11769 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011770 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000011771 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11772 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11773 bnx2x_q_stats_arr[j].string, i);
11774 k += BNX2X_NUM_Q_STATS;
11775 }
11776 if (IS_E1HMF_MODE_STAT(bp))
11777 break;
11778 for (j = 0; j < BNX2X_NUM_STATS; j++)
11779 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11780 bnx2x_stats_arr[j].string);
11781 } else {
11782 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11783 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11784 continue;
11785 strcpy(buf + j*ETH_GSTRING_LEN,
11786 bnx2x_stats_arr[i].string);
11787 j++;
11788 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011789 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011790 break;
11791
11792 case ETH_SS_TEST:
11793 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11794 break;
11795 }
11796}
11797
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011798static void bnx2x_get_ethtool_stats(struct net_device *dev,
11799 struct ethtool_stats *stats, u64 *buf)
11800{
11801 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000011802 u32 *hw_stats, *offset;
11803 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011804
Eilon Greensteinde832a52009-02-12 08:36:33 +000011805 if (is_multi(bp)) {
11806 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011807 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000011808 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11809 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11810 if (bnx2x_q_stats_arr[j].size == 0) {
11811 /* skip this counter */
11812 buf[k + j] = 0;
11813 continue;
11814 }
11815 offset = (hw_stats +
11816 bnx2x_q_stats_arr[j].offset);
11817 if (bnx2x_q_stats_arr[j].size == 4) {
11818 /* 4-byte counter */
11819 buf[k + j] = (u64) *offset;
11820 continue;
11821 }
11822 /* 8-byte counter */
11823 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11824 }
11825 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011826 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000011827 if (IS_E1HMF_MODE_STAT(bp))
11828 return;
11829 hw_stats = (u32 *)&bp->eth_stats;
11830 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11831 if (bnx2x_stats_arr[j].size == 0) {
11832 /* skip this counter */
11833 buf[k + j] = 0;
11834 continue;
11835 }
11836 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11837 if (bnx2x_stats_arr[j].size == 4) {
11838 /* 4-byte counter */
11839 buf[k + j] = (u64) *offset;
11840 continue;
11841 }
11842 /* 8-byte counter */
11843 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011844 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000011845 } else {
11846 hw_stats = (u32 *)&bp->eth_stats;
11847 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11848 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11849 continue;
11850 if (bnx2x_stats_arr[i].size == 0) {
11851 /* skip this counter */
11852 buf[j] = 0;
11853 j++;
11854 continue;
11855 }
11856 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11857 if (bnx2x_stats_arr[i].size == 4) {
11858 /* 4-byte counter */
11859 buf[j] = (u64) *offset;
11860 j++;
11861 continue;
11862 }
11863 /* 8-byte counter */
11864 buf[j] = HILO_U64(*offset, *(offset + 1));
11865 j++;
11866 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011867 }
11868}
11869
11870static int bnx2x_phys_id(struct net_device *dev, u32 data)
11871{
11872 struct bnx2x *bp = netdev_priv(dev);
11873 int i;
11874
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011875 if (!netif_running(dev))
11876 return 0;
11877
11878 if (!bp->port.pmf)
11879 return 0;
11880
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011881 if (data == 0)
11882 data = 2;
11883
11884 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011885 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020011886 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11887 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011888 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020011889 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011890
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011891 msleep_interruptible(500);
11892 if (signal_pending(current))
11893 break;
11894 }
11895
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011896 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020011897 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11898 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011899
11900 return 0;
11901}
11902
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070011903static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011904 .get_settings = bnx2x_get_settings,
11905 .set_settings = bnx2x_set_settings,
11906 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000011907 .get_regs_len = bnx2x_get_regs_len,
11908 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011909 .get_wol = bnx2x_get_wol,
11910 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011911 .get_msglevel = bnx2x_get_msglevel,
11912 .set_msglevel = bnx2x_set_msglevel,
11913 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070011914 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011915 .get_eeprom_len = bnx2x_get_eeprom_len,
11916 .get_eeprom = bnx2x_get_eeprom,
11917 .set_eeprom = bnx2x_set_eeprom,
11918 .get_coalesce = bnx2x_get_coalesce,
11919 .set_coalesce = bnx2x_set_coalesce,
11920 .get_ringparam = bnx2x_get_ringparam,
11921 .set_ringparam = bnx2x_set_ringparam,
11922 .get_pauseparam = bnx2x_get_pauseparam,
11923 .set_pauseparam = bnx2x_set_pauseparam,
11924 .get_rx_csum = bnx2x_get_rx_csum,
11925 .set_rx_csum = bnx2x_set_rx_csum,
11926 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011927 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011928 .set_flags = bnx2x_set_flags,
11929 .get_flags = ethtool_op_get_flags,
11930 .get_sg = ethtool_op_get_sg,
11931 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011932 .get_tso = ethtool_op_get_tso,
11933 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011934 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000011935 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011936 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011937 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011938 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011939};
11940
11941/* end of ethtool_ops */
11942
11943/****************************************************************************
11944* General service functions
11945****************************************************************************/
11946
11947static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11948{
11949 u16 pmcsr;
11950
11951 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11952
11953 switch (state) {
11954 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011955 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011956 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11957 PCI_PM_CTRL_PME_STATUS));
11958
11959 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070011960 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011961 msleep(20);
11962 break;
11963
11964 case PCI_D3hot:
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +000011965 /* If there are other clients above don't
11966 shut down the power */
11967 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11968 return 0;
11969 /* Don't shut down the power for emulation and FPGA */
11970 if (CHIP_REV_IS_SLOW(bp))
11971 return 0;
11972
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011973 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11974 pmcsr |= 3;
11975
11976 if (bp->wol)
11977 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11978
11979 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11980 pmcsr);
11981
11982 /* No more memory access after this point until
11983 * device is brought back to D0.
11984 */
11985 break;
11986
11987 default:
11988 return -EINVAL;
11989 }
11990 return 0;
11991}
11992
Eilon Greenstein237907c2009-01-14 06:42:44 +000011993static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11994{
11995 u16 rx_cons_sb;
11996
11997 /* Tell compiler that status block fields can change */
11998 barrier();
11999 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
12000 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
12001 rx_cons_sb++;
12002 return (fp->rx_comp_cons != rx_cons_sb);
12003}
12004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012005/*
12006 * net_device service functions
12007 */
12008
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012009static int bnx2x_poll(struct napi_struct *napi, int budget)
12010{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012011 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012012 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12013 napi);
12014 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012015
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012016 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012017#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012018 if (unlikely(bp->panic)) {
12019 napi_complete(napi);
12020 return 0;
12021 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012022#endif
12023
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012024 if (bnx2x_has_tx_work(fp))
12025 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012026
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012027 if (bnx2x_has_rx_work(fp)) {
12028 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012029
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012030 /* must not complete if we consumed full budget */
12031 if (work_done >= budget)
12032 break;
12033 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000012034
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012035 /* Fall out from the NAPI loop if needed */
12036 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12037 bnx2x_update_fpsb_idx(fp);
12038 /* bnx2x_has_rx_work() reads the status block, thus we need
12039 * to ensure that status block indices have been actually read
12040 * (bnx2x_update_fpsb_idx) prior to this check
12041 * (bnx2x_has_rx_work) so that we won't write the "newer"
12042 * value of the status block to IGU (if there was a DMA right
12043 * after bnx2x_has_rx_work and if there is no rmb, the memory
12044 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12045 * before bnx2x_ack_sb). In this case there will never be
12046 * another interrupt until there is another update of the
12047 * status block, while there is still unhandled work.
12048 */
12049 rmb();
12050
12051 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12052 napi_complete(napi);
12053 /* Re-enable interrupts */
12054 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12055 le16_to_cpu(fp->fp_c_idx),
12056 IGU_INT_NOP, 1);
12057 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12058 le16_to_cpu(fp->fp_u_idx),
12059 IGU_INT_ENABLE, 1);
12060 break;
12061 }
12062 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000012063 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012064
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012065 return work_done;
12066}
12067
Eilon Greenstein755735e2008-06-23 20:35:13 -070012068
12069/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070012070 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070012071 * we use one mapping for both BDs
12072 * So far this has only been observed to happen
12073 * in Other Operating Systems(TM)
12074 */
12075static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12076 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070012077 struct sw_tx_bd *tx_buf,
12078 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735e2008-06-23 20:35:13 -070012079 u16 bd_prod, int nbd)
12080{
Eilon Greensteinca003922009-08-12 22:53:28 -070012081 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012082 struct eth_tx_bd *d_tx_bd;
12083 dma_addr_t mapping;
12084 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12085
12086 /* first fix first BD */
12087 h_tx_bd->nbd = cpu_to_le16(nbd);
12088 h_tx_bd->nbytes = cpu_to_le16(hlen);
12089
12090 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12091 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12092 h_tx_bd->addr_lo, h_tx_bd->nbd);
12093
12094 /* now get a new data BD
12095 * (after the pbd) and fill it */
12096 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070012097 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012098
12099 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12100 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12101
12102 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12103 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12104 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070012105
12106 /* this marks the BD as one that has no individual mapping */
12107 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12108
Eilon Greenstein755735e2008-06-23 20:35:13 -070012109 DP(NETIF_MSG_TX_QUEUED,
12110 "TSO split data size is %d (%x:%x)\n",
12111 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12112
Eilon Greensteinca003922009-08-12 22:53:28 -070012113 /* update tx_bd */
12114 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012115
12116 return bd_prod;
12117}
12118
12119static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12120{
12121 if (fix > 0)
12122 csum = (u16) ~csum_fold(csum_sub(csum,
12123 csum_partial(t_header - fix, fix, 0)));
12124
12125 else if (fix < 0)
12126 csum = (u16) ~csum_fold(csum_add(csum,
12127 csum_partial(t_header, -fix, 0)));
12128
12129 return swab16(csum);
12130}
12131
12132static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12133{
12134 u32 rc;
12135
12136 if (skb->ip_summed != CHECKSUM_PARTIAL)
12137 rc = XMIT_PLAIN;
12138
12139 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000012140 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070012141 rc = XMIT_CSUM_V6;
12142 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12143 rc |= XMIT_CSUM_TCP;
12144
12145 } else {
12146 rc = XMIT_CSUM_V4;
12147 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12148 rc |= XMIT_CSUM_TCP;
12149 }
12150 }
12151
12152 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000012153 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735e2008-06-23 20:35:13 -070012154
12155 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000012156 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735e2008-06-23 20:35:13 -070012157
12158 return rc;
12159}
12160
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012161#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000012162/* check if packet requires linearization (packet is too fragmented)
12163 no need to check fragmentation if page size > 8K (there will be no
12164 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070012165static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12166 u32 xmit_type)
12167{
12168 int to_copy = 0;
12169 int hlen = 0;
12170 int first_bd_sz = 0;
12171
12172 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12173 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12174
12175 if (xmit_type & XMIT_GSO) {
12176 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12177 /* Check if LSO packet needs to be copied:
12178 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12179 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070012180 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070012181 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12182 int wnd_idx = 0;
12183 int frag_idx = 0;
12184 u32 wnd_sum = 0;
12185
12186 /* Headers length */
12187 hlen = (int)(skb_transport_header(skb) - skb->data) +
12188 tcp_hdrlen(skb);
12189
12190 /* Amount of data (w/o headers) on linear part of SKB*/
12191 first_bd_sz = skb_headlen(skb) - hlen;
12192
12193 wnd_sum = first_bd_sz;
12194
12195 /* Calculate the first sum - it's special */
12196 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12197 wnd_sum +=
12198 skb_shinfo(skb)->frags[frag_idx].size;
12199
12200 /* If there was data on linear skb data - check it */
12201 if (first_bd_sz > 0) {
12202 if (unlikely(wnd_sum < lso_mss)) {
12203 to_copy = 1;
12204 goto exit_lbl;
12205 }
12206
12207 wnd_sum -= first_bd_sz;
12208 }
12209
12210 /* Others are easier: run through the frag list and
12211 check all windows */
12212 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12213 wnd_sum +=
12214 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12215
12216 if (unlikely(wnd_sum < lso_mss)) {
12217 to_copy = 1;
12218 break;
12219 }
12220 wnd_sum -=
12221 skb_shinfo(skb)->frags[wnd_idx].size;
12222 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070012223 } else {
12224 /* in non-LSO too fragmented packet should always
12225 be linearized */
12226 to_copy = 1;
12227 }
12228 }
12229
12230exit_lbl:
12231 if (unlikely(to_copy))
12232 DP(NETIF_MSG_TX_QUEUED,
12233 "Linearization IS REQUIRED for %s packet. "
12234 "num_frags %d hlen %d first_bd_sz %d\n",
12235 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12236 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12237
12238 return to_copy;
12239}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012240#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070012241
12242/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012243 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070012244 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012245 */
Stephen Hemminger613573252009-08-31 19:50:58 +000012246static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012247{
12248 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012249 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012250 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012251 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070012252 struct eth_tx_start_bd *tx_start_bd;
12253 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012254 struct eth_tx_parse_bd *pbd = NULL;
12255 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012256 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012257 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012258 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735e2008-06-23 20:35:13 -070012259 int i;
12260 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070012261 __le16 pkt_size = 0;
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000012262 struct ethhdr *eth;
12263 u8 mac_type = UNICAST_ADDRESS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012264
12265#ifdef BNX2X_STOP_ON_ERROR
12266 if (unlikely(bp->panic))
12267 return NETDEV_TX_BUSY;
12268#endif
12269
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012270 fp_index = skb_get_queue_mapping(skb);
12271 txq = netdev_get_tx_queue(dev, fp_index);
12272
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012273 fp = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070012274
Yitchak Gertner231fd582008-08-25 15:27:06 -070012275 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012276 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012277 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012278 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12279 return NETDEV_TX_BUSY;
12280 }
12281
Eilon Greenstein755735e2008-06-23 20:35:13 -070012282 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12283 " gso type %x xmit_type %x\n",
12284 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12285 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12286
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000012287 eth = (struct ethhdr *)skb->data;
12288
12289 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12290 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12291 if (is_broadcast_ether_addr(eth->h_dest))
12292 mac_type = BROADCAST_ADDRESS;
12293 else
12294 mac_type = MULTICAST_ADDRESS;
12295 }
12296
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012297#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000012298 /* First, check if we need to linearize the skb (due to FW
12299 restrictions). No need to check fragmentation if page size > 8K
12300 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070012301 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12302 /* Statistics of linearization */
12303 bp->lin_cnt++;
12304 if (skb_linearize(skb) != 0) {
12305 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12306 "silently dropping this SKB\n");
12307 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012308 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012309 }
12310 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012311#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070012312
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012313 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070012314 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070012315 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735e2008-06-23 20:35:13 -070012316 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012317 (don't forget to mark the last one as last,
12318 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070012319 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012320 */
12321
12322 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012323 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012324
Eilon Greenstein755735e2008-06-23 20:35:13 -070012325 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012326 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070012327 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012328
Eilon Greensteinca003922009-08-12 22:53:28 -070012329 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Vladislav Zolotarovdea7aab2010-04-19 01:14:07 +000012330 tx_start_bd->general_data = (mac_type <<
12331 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070012332 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070012333 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012334
Eilon Greenstein755735e2008-06-23 20:35:13 -070012335 /* remember the first BD of the packet */
12336 tx_buf->first_bd = fp->tx_bd_prod;
12337 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070012338 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012339
12340 DP(NETIF_MSG_TX_QUEUED,
12341 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070012342 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012343
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012344#ifdef BCM_VLAN
12345 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12346 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070012347 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12348 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012349 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012350#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070012351 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070012352
Eilon Greensteinca003922009-08-12 22:53:28 -070012353 /* turn on parsing and get a BD */
12354 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12355 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012356
Eilon Greensteinca003922009-08-12 22:53:28 -070012357 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735e2008-06-23 20:35:13 -070012358
12359 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070012360 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012361
12362 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000012363 pbd->global_data =
12364 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12365 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070012366
12367 pbd->ip_hlen = (skb_transport_header(skb) -
12368 skb_network_header(skb)) / 2;
12369
12370 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12371
12372 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070012373 hlen = hlen*2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012374
Eilon Greensteinca003922009-08-12 22:53:28 -070012375 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012376
12377 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070012378 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735e2008-06-23 20:35:13 -070012379 ETH_TX_BD_FLAGS_IP_CSUM;
12380 else
Eilon Greensteinca003922009-08-12 22:53:28 -070012381 tx_start_bd->bd_flags.as_bitfield |=
12382 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012383
12384 if (xmit_type & XMIT_CSUM_TCP) {
12385 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12386
12387 } else {
12388 s8 fix = SKB_CS_OFF(skb); /* signed! */
12389
Eilon Greensteinca003922009-08-12 22:53:28 -070012390 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070012391
12392 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070012393 "hlen %d fix %d csum before fix %x\n",
12394 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735e2008-06-23 20:35:13 -070012395
12396 /* HW bug: fixup the CSUM */
12397 pbd->tcp_pseudo_csum =
12398 bnx2x_csum_fix(skb_transport_header(skb),
12399 SKB_CS(skb), fix);
12400
12401 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12402 pbd->tcp_pseudo_csum);
12403 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012404 }
12405
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012406 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12407 skb_headlen(skb), DMA_TO_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012408
Eilon Greensteinca003922009-08-12 22:53:28 -070012409 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12410 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12411 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12412 tx_start_bd->nbd = cpu_to_le16(nbd);
12413 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12414 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012415
12416 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070012417 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070012418 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12419 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12420 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012421
Eilon Greenstein755735e2008-06-23 20:35:13 -070012422 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012423
12424 DP(NETIF_MSG_TX_QUEUED,
12425 "TSO packet len %d hlen %d total len %d tso size %d\n",
12426 skb->len, hlen, skb_headlen(skb),
12427 skb_shinfo(skb)->gso_size);
12428
Eilon Greensteinca003922009-08-12 22:53:28 -070012429 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012430
Eilon Greenstein755735e2008-06-23 20:35:13 -070012431 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070012432 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12433 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012434
12435 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12436 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070012437 pbd->tcp_flags = pbd_tcp_flags(skb);
12438
12439 if (xmit_type & XMIT_GSO_V4) {
12440 pbd->ip_id = swab16(ip_hdr(skb)->id);
12441 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012442 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12443 ip_hdr(skb)->daddr,
12444 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070012445
12446 } else
12447 pbd->tcp_pseudo_csum =
12448 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12449 &ipv6_hdr(skb)->daddr,
12450 0, IPPROTO_TCP, 0));
12451
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012452 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12453 }
Eilon Greensteinca003922009-08-12 22:53:28 -070012454 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012455
Eilon Greenstein755735e2008-06-23 20:35:13 -070012456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012458
Eilon Greenstein755735e2008-06-23 20:35:13 -070012459 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070012460 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12461 if (total_pkt_bd == NULL)
12462 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012463
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012464 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12465 frag->page_offset,
12466 frag->size, DMA_TO_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012467
Eilon Greensteinca003922009-08-12 22:53:28 -070012468 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12469 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12470 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12471 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012472
Eilon Greenstein755735e2008-06-23 20:35:13 -070012473 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070012474 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12475 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12476 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012477 }
12478
Eilon Greensteinca003922009-08-12 22:53:28 -070012479 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012480
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012481 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12482
Eilon Greenstein755735e2008-06-23 20:35:13 -070012483 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012484 * if the packet contains or ends with it
12485 */
12486 if (TX_BD_POFF(bd_prod) < nbd)
12487 nbd++;
12488
Eilon Greensteinca003922009-08-12 22:53:28 -070012489 if (total_pkt_bd != NULL)
12490 total_pkt_bd->total_pkt_bytes = pkt_size;
12491
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012492 if (pbd)
12493 DP(NETIF_MSG_TX_QUEUED,
12494 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12495 " tcp_flags %x xsum %x seq %u hlen %u\n",
12496 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12497 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070012498 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012499
Eilon Greenstein755735e2008-06-23 20:35:13 -070012500 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012501
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080012502 /*
12503 * Make sure that the BD data is updated before updating the producer
12504 * since FW might read the BD right after the producer is updated.
12505 * This is only applicable for weak-ordered memory model archs such
12506 * as IA-64. The following barrier is also mandatory since FW will
12507 * assumes packets must have BDs.
12508 */
12509 wmb();
12510
Eilon Greensteinca003922009-08-12 22:53:28 -070012511 fp->tx_db.data.prod += nbd;
12512 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012513 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012514
12515 mmiowb();
12516
Eilon Greenstein755735e2008-06-23 20:35:13 -070012517 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012518
12519 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070012520 netif_tx_stop_queue(txq);
Stanislaw Gruszka9baddeb2010-03-09 06:55:02 +000012521
12522 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12523 * ordering of set_bit() in netif_tx_stop_queue() and read of
12524 * fp->bd_tx_cons */
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080012525 smp_mb();
Stanislaw Gruszka9baddeb2010-03-09 06:55:02 +000012526
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012527 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012528 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012529 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012530 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012531 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012532
12533 return NETDEV_TX_OK;
12534}
12535
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012536/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012537static int bnx2x_open(struct net_device *dev)
12538{
12539 struct bnx2x *bp = netdev_priv(dev);
12540
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000012541 netif_carrier_off(dev);
12542
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012543 bnx2x_set_power_state(bp, PCI_D0);
12544
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012545 if (!bnx2x_reset_is_done(bp)) {
12546 do {
12547 /* Reset MCP mail box sequence if there is on going
12548 * recovery
12549 */
12550 bp->fw_seq = 0;
12551
12552 /* If it's the first function to load and reset done
12553 * is still not cleared it may mean that. We don't
12554 * check the attention state here because it may have
12555 * already been cleared by a "common" reset but we
12556 * shell proceed with "process kill" anyway.
12557 */
12558 if ((bnx2x_get_load_cnt(bp) == 0) &&
12559 bnx2x_trylock_hw_lock(bp,
12560 HW_LOCK_RESOURCE_RESERVED_08) &&
12561 (!bnx2x_leader_reset(bp))) {
12562 DP(NETIF_MSG_HW, "Recovered in open\n");
12563 break;
12564 }
12565
12566 bnx2x_set_power_state(bp, PCI_D3hot);
12567
12568 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12569 " completed yet. Try again later. If u still see this"
12570 " message after a few retries then power cycle is"
12571 " required.\n", bp->dev->name);
12572
12573 return -EAGAIN;
12574 } while (0);
12575 }
12576
12577 bp->recovery_state = BNX2X_RECOVERY_DONE;
12578
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012579 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012580}
12581
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012582/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012583static int bnx2x_close(struct net_device *dev)
12584{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012585 struct bnx2x *bp = netdev_priv(dev);
12586
12587 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012588 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +000012589 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012590
12591 return 0;
12592}
12593
Eilon Greensteinf5372252009-02-12 08:38:30 +000012594/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012595static void bnx2x_set_rx_mode(struct net_device *dev)
12596{
12597 struct bnx2x *bp = netdev_priv(dev);
12598 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12599 int port = BP_PORT(bp);
12600
12601 if (bp->state != BNX2X_STATE_OPEN) {
12602 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12603 return;
12604 }
12605
12606 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12607
12608 if (dev->flags & IFF_PROMISC)
12609 rx_mode = BNX2X_RX_MODE_PROMISC;
12610
12611 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000012612 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12613 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012614 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12615
12616 else { /* some multicasts */
12617 if (CHIP_IS_E1(bp)) {
12618 int i, old, offset;
Jiri Pirko22bedad2010-04-01 21:22:57 +000012619 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012620 struct mac_configuration_cmd *config =
12621 bnx2x_sp(bp, mcast_config);
12622
Jiri Pirko0ddf4772010-02-20 00:13:58 +000012623 i = 0;
Jiri Pirko22bedad2010-04-01 21:22:57 +000012624 netdev_for_each_mc_addr(ha, dev) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012625 config->config_table[i].
12626 cam_entry.msb_mac_addr =
Jiri Pirko22bedad2010-04-01 21:22:57 +000012627 swab16(*(u16 *)&ha->addr[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012628 config->config_table[i].
12629 cam_entry.middle_mac_addr =
Jiri Pirko22bedad2010-04-01 21:22:57 +000012630 swab16(*(u16 *)&ha->addr[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012631 config->config_table[i].
12632 cam_entry.lsb_mac_addr =
Jiri Pirko22bedad2010-04-01 21:22:57 +000012633 swab16(*(u16 *)&ha->addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012634 config->config_table[i].cam_entry.flags =
12635 cpu_to_le16(port);
12636 config->config_table[i].
12637 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070012638 config->config_table[i].target_table_entry.
12639 clients_bit_vector =
12640 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012641 config->config_table[i].
12642 target_table_entry.vlan_id = 0;
12643
12644 DP(NETIF_MSG_IFUP,
12645 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12646 config->config_table[i].
12647 cam_entry.msb_mac_addr,
12648 config->config_table[i].
12649 cam_entry.middle_mac_addr,
12650 config->config_table[i].
12651 cam_entry.lsb_mac_addr);
Jiri Pirko0ddf4772010-02-20 00:13:58 +000012652 i++;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012653 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012654 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012655 if (old > i) {
12656 for (; i < old; i++) {
12657 if (CAM_IS_INVALID(config->
12658 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000012659 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012660 break;
12661 }
12662 /* invalidate */
12663 CAM_INVALIDATE(config->
12664 config_table[i]);
12665 }
12666 }
12667
12668 if (CHIP_REV_IS_SLOW(bp))
12669 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12670 else
12671 offset = BNX2X_MAX_MULTICAST*(1 + port);
12672
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012673 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012674 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012675 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012676 config->hdr.reserved1 = 0;
12677
Michael Chane665bfd2009-10-10 13:46:54 +000012678 bp->set_mac_pending++;
12679 smp_wmb();
12680
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012681 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12682 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12683 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12684 0);
12685 } else { /* E1H */
12686 /* Accept one or more multicasts */
Jiri Pirko22bedad2010-04-01 21:22:57 +000012687 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012688 u32 mc_filter[MC_HASH_SIZE];
12689 u32 crc, bit, regidx;
12690 int i;
12691
12692 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12693
Jiri Pirko22bedad2010-04-01 21:22:57 +000012694 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -070012695 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Jiri Pirko22bedad2010-04-01 21:22:57 +000012696 ha->addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012697
Jiri Pirko22bedad2010-04-01 21:22:57 +000012698 crc = crc32c_le(0, ha->addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012699 bit = (crc >> 24) & 0xff;
12700 regidx = bit >> 5;
12701 bit &= 0x1f;
12702 mc_filter[regidx] |= (1 << bit);
12703 }
12704
12705 for (i = 0; i < MC_HASH_SIZE; i++)
12706 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12707 mc_filter[i]);
12708 }
12709 }
12710
12711 bp->rx_mode = rx_mode;
12712 bnx2x_set_storm_rx_mode(bp);
12713}
12714
12715/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012716static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12717{
12718 struct sockaddr *addr = p;
12719 struct bnx2x *bp = netdev_priv(dev);
12720
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012721 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012722 return -EINVAL;
12723
12724 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012725 if (netif_running(dev)) {
12726 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000012727 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012728 else
Michael Chane665bfd2009-10-10 13:46:54 +000012729 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012730 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012731
12732 return 0;
12733}
12734
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070012735/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012736static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12737 int devad, u16 addr)
12738{
12739 struct bnx2x *bp = netdev_priv(netdev);
12740 u16 value;
12741 int rc;
12742 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12743
12744 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12745 prtad, devad, addr);
12746
12747 if (prtad != bp->mdio.prtad) {
12748 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12749 prtad, bp->mdio.prtad);
12750 return -EINVAL;
12751 }
12752
12753 /* The HW expects different devad if CL22 is used */
12754 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12755
12756 bnx2x_acquire_phy_lock(bp);
12757 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12758 devad, addr, &value);
12759 bnx2x_release_phy_lock(bp);
12760 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12761
12762 if (!rc)
12763 rc = value;
12764 return rc;
12765}
12766
12767/* called with rtnl_lock */
12768static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12769 u16 addr, u16 value)
12770{
12771 struct bnx2x *bp = netdev_priv(netdev);
12772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12773 int rc;
12774
12775 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12776 " value 0x%x\n", prtad, devad, addr, value);
12777
12778 if (prtad != bp->mdio.prtad) {
12779 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12780 prtad, bp->mdio.prtad);
12781 return -EINVAL;
12782 }
12783
12784 /* The HW expects different devad if CL22 is used */
12785 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12786
12787 bnx2x_acquire_phy_lock(bp);
12788 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12789 devad, addr, value);
12790 bnx2x_release_phy_lock(bp);
12791 return rc;
12792}
12793
12794/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012795static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12796{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012797 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012798 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012799
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012800 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12801 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012802
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012803 if (!netif_running(dev))
12804 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070012805
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012806 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012807}
12808
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012809/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012810static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12811{
12812 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012813 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012814
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012815 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12816 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12817 return -EAGAIN;
12818 }
12819
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012820 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12821 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12822 return -EINVAL;
12823
12824 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080012825 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012826 * only updated as part of load
12827 */
12828 dev->mtu = new_mtu;
12829
12830 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012831 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12832 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012833 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012834
12835 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012836}
12837
12838static void bnx2x_tx_timeout(struct net_device *dev)
12839{
12840 struct bnx2x *bp = netdev_priv(dev);
12841
12842#ifdef BNX2X_STOP_ON_ERROR
12843 if (!bp->panic)
12844 bnx2x_panic();
12845#endif
12846 /* This allows the netif to be shutdown gracefully before resetting */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012847 schedule_delayed_work(&bp->reset_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012848}
12849
12850#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012851/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012852static void bnx2x_vlan_rx_register(struct net_device *dev,
12853 struct vlan_group *vlgrp)
12854{
12855 struct bnx2x *bp = netdev_priv(dev);
12856
12857 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012858
12859 /* Set flags according to the required capabilities */
12860 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12861
12862 if (dev->features & NETIF_F_HW_VLAN_TX)
12863 bp->flags |= HW_VLAN_TX_FLAG;
12864
12865 if (dev->features & NETIF_F_HW_VLAN_RX)
12866 bp->flags |= HW_VLAN_RX_FLAG;
12867
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012868 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080012869 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012870}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012871
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012872#endif
12873
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000012874#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012875static void poll_bnx2x(struct net_device *dev)
12876{
12877 struct bnx2x *bp = netdev_priv(dev);
12878
12879 disable_irq(bp->pdev->irq);
12880 bnx2x_interrupt(bp->pdev->irq, dev);
12881 enable_irq(bp->pdev->irq);
12882}
12883#endif
12884
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012885static const struct net_device_ops bnx2x_netdev_ops = {
12886 .ndo_open = bnx2x_open,
12887 .ndo_stop = bnx2x_close,
12888 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012889 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012890 .ndo_set_mac_address = bnx2x_change_mac_addr,
12891 .ndo_validate_addr = eth_validate_addr,
12892 .ndo_do_ioctl = bnx2x_ioctl,
12893 .ndo_change_mtu = bnx2x_change_mtu,
12894 .ndo_tx_timeout = bnx2x_tx_timeout,
12895#ifdef BCM_VLAN
12896 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12897#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000012898#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012899 .ndo_poll_controller = poll_bnx2x,
12900#endif
12901};
12902
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012903static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12904 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012905{
12906 struct bnx2x *bp;
12907 int rc;
12908
12909 SET_NETDEV_DEV(dev, &pdev->dev);
12910 bp = netdev_priv(dev);
12911
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012912 bp->dev = dev;
12913 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012914 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012915 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012916
12917 rc = pci_enable_device(pdev);
12918 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012919 dev_err(&bp->pdev->dev,
12920 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012921 goto err_out;
12922 }
12923
12924 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012925 dev_err(&bp->pdev->dev,
12926 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012927 rc = -ENODEV;
12928 goto err_out_disable;
12929 }
12930
12931 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012932 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12933 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012934 rc = -ENODEV;
12935 goto err_out_disable;
12936 }
12937
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012938 if (atomic_read(&pdev->enable_cnt) == 1) {
12939 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12940 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012941 dev_err(&bp->pdev->dev,
12942 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012943 goto err_out_disable;
12944 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012945
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012946 pci_set_master(pdev);
12947 pci_save_state(pdev);
12948 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012949
12950 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12951 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012952 dev_err(&bp->pdev->dev,
12953 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012954 rc = -EIO;
12955 goto err_out_release;
12956 }
12957
12958 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12959 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012960 dev_err(&bp->pdev->dev,
12961 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012962 rc = -EIO;
12963 goto err_out_release;
12964 }
12965
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012966 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012967 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012968 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012969 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12970 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012971 rc = -EIO;
12972 goto err_out_release;
12973 }
12974
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012975 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012976 dev_err(&bp->pdev->dev,
12977 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012978 rc = -EIO;
12979 goto err_out_release;
12980 }
12981
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012982 dev->mem_start = pci_resource_start(pdev, 0);
12983 dev->base_addr = dev->mem_start;
12984 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012985
12986 dev->irq = pdev->irq;
12987
Arjan van de Ven275f1652008-10-20 21:42:39 -070012988 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012989 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000012990 dev_err(&bp->pdev->dev,
12991 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012992 rc = -ENOMEM;
12993 goto err_out_release;
12994 }
12995
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012996 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12997 min_t(u64, BNX2X_DB_SIZE,
12998 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012999 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013000 dev_err(&bp->pdev->dev,
13001 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013002 rc = -ENOMEM;
13003 goto err_out_unmap;
13004 }
13005
13006 bnx2x_set_power_state(bp, PCI_D0);
13007
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013008 /* clean indirect addresses */
13009 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13010 PCICFG_VENDOR_ID_OFFSET);
13011 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
13012 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
13013 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
13014 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013015
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013016 /* Reset the load counter */
13017 bnx2x_clear_load_cnt(bp);
13018
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013019 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013020
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080013021 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013022 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013023 dev->features |= NETIF_F_SG;
13024 dev->features |= NETIF_F_HW_CSUM;
13025 if (bp->flags & USING_DAC_FLAG)
13026 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000013027 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13028 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013029#ifdef BCM_VLAN
13030 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080013031 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000013032
13033 dev->vlan_features |= NETIF_F_SG;
13034 dev->vlan_features |= NETIF_F_HW_CSUM;
13035 if (bp->flags & USING_DAC_FLAG)
13036 dev->vlan_features |= NETIF_F_HIGHDMA;
13037 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13038 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013039#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013040
Eilon Greenstein01cd4522009-08-12 08:23:08 +000013041 /* get_port_hwinfo() will set prtad and mmds properly */
13042 bp->mdio.prtad = MDIO_PRTAD_NONE;
13043 bp->mdio.mmds = 0;
13044 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13045 bp->mdio.dev = dev;
13046 bp->mdio.mdio_read = bnx2x_mdio_read;
13047 bp->mdio.mdio_write = bnx2x_mdio_write;
13048
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013049 return 0;
13050
13051err_out_unmap:
13052 if (bp->regview) {
13053 iounmap(bp->regview);
13054 bp->regview = NULL;
13055 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013056 if (bp->doorbells) {
13057 iounmap(bp->doorbells);
13058 bp->doorbells = NULL;
13059 }
13060
13061err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013062 if (atomic_read(&pdev->enable_cnt) == 1)
13063 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013064
13065err_out_disable:
13066 pci_disable_device(pdev);
13067 pci_set_drvdata(pdev, NULL);
13068
13069err_out:
13070 return rc;
13071}
13072
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013073static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13074 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080013075{
13076 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13077
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013078 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13079
13080 /* return value of 1=2.5GHz 2=5GHz */
13081 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080013082}
13083
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013084static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13085{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013086 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013087 struct bnx2x_fw_file_hdr *fw_hdr;
13088 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013089 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013090 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013091 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013092 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013093
13094 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13095 return -EINVAL;
13096
13097 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13098 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13099
13100 /* Make sure none of the offsets and sizes make us read beyond
13101 * the end of the firmware data */
13102 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13103 offset = be32_to_cpu(sections[i].offset);
13104 len = be32_to_cpu(sections[i].len);
13105 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013106 dev_err(&bp->pdev->dev,
13107 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013108 return -EINVAL;
13109 }
13110 }
13111
13112 /* Likewise for the init_ops offsets */
13113 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13114 ops_offsets = (u16 *)(firmware->data + offset);
13115 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13116
13117 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13118 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013119 dev_err(&bp->pdev->dev,
13120 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013121 return -EINVAL;
13122 }
13123 }
13124
13125 /* Check FW version */
13126 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13127 fw_ver = firmware->data + offset;
13128 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13129 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13130 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13131 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013132 dev_err(&bp->pdev->dev,
13133 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013134 fw_ver[0], fw_ver[1], fw_ver[2],
13135 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13136 BCM_5710_FW_MINOR_VERSION,
13137 BCM_5710_FW_REVISION_VERSION,
13138 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013139 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013140 }
13141
13142 return 0;
13143}
13144
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013145static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013146{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013147 const __be32 *source = (const __be32 *)_source;
13148 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013149 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013150
13151 for (i = 0; i < n/4; i++)
13152 target[i] = be32_to_cpu(source[i]);
13153}
13154
13155/*
13156 Ops array is stored in the following format:
13157 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13158 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013159static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013160{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013161 const __be32 *source = (const __be32 *)_source;
13162 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013163 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013164
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013165 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013166 tmp = be32_to_cpu(source[j]);
13167 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013168 target[i].offset = tmp & 0xffffff;
13169 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013170 }
13171}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013172
13173static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013174{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013175 const __be16 *source = (const __be16 *)_source;
13176 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013177 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013178
13179 for (i = 0; i < n/2; i++)
13180 target[i] = be16_to_cpu(source[i]);
13181}
13182
Joe Perches7995c642010-02-17 15:01:52 +000013183#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13184do { \
13185 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13186 bp->arr = kmalloc(len, GFP_KERNEL); \
13187 if (!bp->arr) { \
13188 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13189 goto lbl; \
13190 } \
13191 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13192 (u8 *)bp->arr, len); \
13193} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013194
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013195static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13196{
Ben Hutchings45229b42009-11-07 11:53:39 +000013197 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013198 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000013199 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013200
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013201 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000013202 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013203 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000013204 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013205 else {
13206 dev_err(dev, "Unsupported chip revision\n");
13207 return -EINVAL;
13208 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013209
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013210 dev_info(dev, "Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013211
13212 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13213 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013214 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013215 goto request_firmware_exit;
13216 }
13217
13218 rc = bnx2x_check_firmware(bp);
13219 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013220 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013221 goto request_firmware_exit;
13222 }
13223
13224 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13225
13226 /* Initialize the pointers to the init arrays */
13227 /* Blob */
13228 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13229
13230 /* Opcodes */
13231 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13232
13233 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013234 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13235 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013236
13237 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000013238 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13239 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13240 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13241 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13242 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13243 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13244 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13245 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13246 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13247 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13248 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13249 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13250 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13251 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13252 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13253 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013254
13255 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013256
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013257init_offsets_alloc_err:
13258 kfree(bp->init_ops);
13259init_ops_alloc_err:
13260 kfree(bp->init_data);
13261request_firmware_exit:
13262 release_firmware(bp->firmware);
13263
13264 return rc;
13265}
13266
13267
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013268static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13269 const struct pci_device_id *ent)
13270{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013271 struct net_device *dev = NULL;
13272 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013273 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080013274 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013275
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013276 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000013277 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013278 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013279 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013280 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013281 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013282
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013283 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +000013284 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013285
Eilon Greensteindf4770de2009-08-12 08:23:28 +000013286 pci_set_drvdata(pdev, dev);
13287
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013288 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013289 if (rc < 0) {
13290 free_netdev(dev);
13291 return rc;
13292 }
13293
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013294 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000013295 if (rc)
13296 goto init_one_exit;
13297
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013298 /* Set init arrays */
13299 rc = bnx2x_init_firmware(bp, &pdev->dev);
13300 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013301 dev_err(&pdev->dev, "Error loading firmware\n");
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013302 goto init_one_exit;
13303 }
13304
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000013305 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013306 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000013307 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013308 goto init_one_exit;
13309 }
13310
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013311 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013312 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13313 " IRQ %d, ", board_info[ent->driver_data].name,
13314 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13315 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13316 dev->base_addr, bp->pdev->irq);
13317 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000013318
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013319 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013320
13321init_one_exit:
13322 if (bp->regview)
13323 iounmap(bp->regview);
13324
13325 if (bp->doorbells)
13326 iounmap(bp->doorbells);
13327
13328 free_netdev(dev);
13329
13330 if (atomic_read(&pdev->enable_cnt) == 1)
13331 pci_release_regions(pdev);
13332
13333 pci_disable_device(pdev);
13334 pci_set_drvdata(pdev, NULL);
13335
13336 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013337}
13338
13339static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13340{
13341 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080013342 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013343
Eliezer Tamir228241e2008-02-28 11:56:57 -080013344 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013345 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080013346 return;
13347 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080013348 bp = netdev_priv(dev);
13349
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013350 unregister_netdev(dev);
13351
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013352 /* Make sure RESET task is not scheduled before continuing */
13353 cancel_delayed_work_sync(&bp->reset_task);
13354
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013355 kfree(bp->init_ops_offsets);
13356 kfree(bp->init_ops);
13357 kfree(bp->init_data);
13358 release_firmware(bp->firmware);
13359
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013360 if (bp->regview)
13361 iounmap(bp->regview);
13362
13363 if (bp->doorbells)
13364 iounmap(bp->doorbells);
13365
13366 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013367
13368 if (atomic_read(&pdev->enable_cnt) == 1)
13369 pci_release_regions(pdev);
13370
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013371 pci_disable_device(pdev);
13372 pci_set_drvdata(pdev, NULL);
13373}
13374
13375static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13376{
13377 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080013378 struct bnx2x *bp;
13379
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013380 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013381 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013382 return -ENODEV;
13383 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080013384 bp = netdev_priv(dev);
13385
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013386 rtnl_lock();
13387
13388 pci_save_state(pdev);
13389
13390 if (!netif_running(dev)) {
13391 rtnl_unlock();
13392 return 0;
13393 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013394
13395 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013396
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070013397 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013398
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013399 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080013400
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013401 rtnl_unlock();
13402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013403 return 0;
13404}
13405
13406static int bnx2x_resume(struct pci_dev *pdev)
13407{
13408 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080013409 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013410 int rc;
13411
Eliezer Tamir228241e2008-02-28 11:56:57 -080013412 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +000013413 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080013414 return -ENODEV;
13415 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080013416 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013417
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013418 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13419 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13420 return -EAGAIN;
13421 }
13422
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013423 rtnl_lock();
13424
Eliezer Tamir228241e2008-02-28 11:56:57 -080013425 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013426
13427 if (!netif_running(dev)) {
13428 rtnl_unlock();
13429 return 0;
13430 }
13431
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013432 bnx2x_set_power_state(bp, PCI_D0);
13433 netif_device_attach(dev);
13434
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070013435 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013436
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013437 rtnl_unlock();
13438
13439 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013440}
13441
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013442static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13443{
13444 int i;
13445
13446 bp->state = BNX2X_STATE_ERROR;
13447
13448 bp->rx_mode = BNX2X_RX_MODE_NONE;
13449
13450 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -070013451 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013452
13453 del_timer_sync(&bp->timer);
13454 bp->stats_state = STATS_STATE_DISABLED;
13455 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13456
13457 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +000013458 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013459
13460 if (CHIP_IS_E1(bp)) {
13461 struct mac_configuration_cmd *config =
13462 bnx2x_sp(bp, mcast_config);
13463
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080013464 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013465 CAM_INVALIDATE(config->config_table[i]);
13466 }
13467
13468 /* Free SKBs, SGEs, TPA pool and driver internals */
13469 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000013470 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013471 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000013472 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000013473 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013474 bnx2x_free_mem(bp);
13475
13476 bp->state = BNX2X_STATE_CLOSED;
13477
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013478 return 0;
13479}
13480
13481static void bnx2x_eeh_recover(struct bnx2x *bp)
13482{
13483 u32 val;
13484
13485 mutex_init(&bp->port.phy_mutex);
13486
13487 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13488 bp->link_params.shmem_base = bp->common.shmem_base;
13489 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13490
13491 if (!bp->common.shmem_base ||
13492 (bp->common.shmem_base < 0xA0000) ||
13493 (bp->common.shmem_base >= 0xC0000)) {
13494 BNX2X_DEV_INFO("MCP not active\n");
13495 bp->flags |= NO_MCP_FLAG;
13496 return;
13497 }
13498
13499 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13500 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13501 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13502 BNX2X_ERR("BAD MCP validity signature\n");
13503
13504 if (!BP_NOMCP(bp)) {
13505 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13506 & DRV_MSG_SEQ_NUMBER_MASK);
13507 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13508 }
13509}
13510
Wendy Xiong493adb12008-06-23 20:36:22 -070013511/**
13512 * bnx2x_io_error_detected - called when PCI error is detected
13513 * @pdev: Pointer to PCI device
13514 * @state: The current pci connection state
13515 *
13516 * This function is called after a PCI bus error affecting
13517 * this device has been detected.
13518 */
13519static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13520 pci_channel_state_t state)
13521{
13522 struct net_device *dev = pci_get_drvdata(pdev);
13523 struct bnx2x *bp = netdev_priv(dev);
13524
13525 rtnl_lock();
13526
13527 netif_device_detach(dev);
13528
Dean Nelson07ce50e2009-07-31 09:13:25 +000013529 if (state == pci_channel_io_perm_failure) {
13530 rtnl_unlock();
13531 return PCI_ERS_RESULT_DISCONNECT;
13532 }
13533
Wendy Xiong493adb12008-06-23 20:36:22 -070013534 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013535 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070013536
13537 pci_disable_device(pdev);
13538
13539 rtnl_unlock();
13540
13541 /* Request a slot reset */
13542 return PCI_ERS_RESULT_NEED_RESET;
13543}
13544
13545/**
13546 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13547 * @pdev: Pointer to PCI device
13548 *
13549 * Restart the card from scratch, as if from a cold-boot.
13550 */
13551static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13552{
13553 struct net_device *dev = pci_get_drvdata(pdev);
13554 struct bnx2x *bp = netdev_priv(dev);
13555
13556 rtnl_lock();
13557
13558 if (pci_enable_device(pdev)) {
13559 dev_err(&pdev->dev,
13560 "Cannot re-enable PCI device after reset\n");
13561 rtnl_unlock();
13562 return PCI_ERS_RESULT_DISCONNECT;
13563 }
13564
13565 pci_set_master(pdev);
13566 pci_restore_state(pdev);
13567
13568 if (netif_running(dev))
13569 bnx2x_set_power_state(bp, PCI_D0);
13570
13571 rtnl_unlock();
13572
13573 return PCI_ERS_RESULT_RECOVERED;
13574}
13575
13576/**
13577 * bnx2x_io_resume - called when traffic can start flowing again
13578 * @pdev: Pointer to PCI device
13579 *
13580 * This callback is called when the error recovery driver tells us that
13581 * its OK to resume normal operation.
13582 */
13583static void bnx2x_io_resume(struct pci_dev *pdev)
13584{
13585 struct net_device *dev = pci_get_drvdata(pdev);
13586 struct bnx2x *bp = netdev_priv(dev);
13587
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013588 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13589 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13590 return;
13591 }
13592
Wendy Xiong493adb12008-06-23 20:36:22 -070013593 rtnl_lock();
13594
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013595 bnx2x_eeh_recover(bp);
13596
Wendy Xiong493adb12008-06-23 20:36:22 -070013597 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013598 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070013599
13600 netif_device_attach(dev);
13601
13602 rtnl_unlock();
13603}
13604
13605static struct pci_error_handlers bnx2x_err_handler = {
13606 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000013607 .slot_reset = bnx2x_io_slot_reset,
13608 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070013609};
13610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013611static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070013612 .name = DRV_MODULE_NAME,
13613 .id_table = bnx2x_pci_tbl,
13614 .probe = bnx2x_init_one,
13615 .remove = __devexit_p(bnx2x_remove_one),
13616 .suspend = bnx2x_suspend,
13617 .resume = bnx2x_resume,
13618 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013619};
13620
13621static int __init bnx2x_init(void)
13622{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000013623 int ret;
13624
Joe Perches7995c642010-02-17 15:01:52 +000013625 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +000013626
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080013627 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13628 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +000013629 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080013630 return -ENOMEM;
13631 }
13632
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000013633 ret = pci_register_driver(&bnx2x_pci_driver);
13634 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +000013635 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000013636 destroy_workqueue(bnx2x_wq);
13637 }
13638 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013639}
13640
13641static void __exit bnx2x_cleanup(void)
13642{
13643 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080013644
13645 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013646}
13647
13648module_init(bnx2x_init);
13649module_exit(bnx2x_cleanup);
13650
Michael Chan993ac7b2009-10-10 13:46:56 +000013651#ifdef BCM_CNIC
13652
13653/* count denotes the number of new completions we have seen */
13654static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13655{
13656 struct eth_spe *spe;
13657
13658#ifdef BNX2X_STOP_ON_ERROR
13659 if (unlikely(bp->panic))
13660 return;
13661#endif
13662
13663 spin_lock_bh(&bp->spq_lock);
13664 bp->cnic_spq_pending -= count;
13665
13666 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13667 bp->cnic_spq_pending++) {
13668
13669 if (!bp->cnic_kwq_pending)
13670 break;
13671
13672 spe = bnx2x_sp_get_next(bp);
13673 *spe = *bp->cnic_kwq_cons;
13674
13675 bp->cnic_kwq_pending--;
13676
13677 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13678 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13679
13680 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13681 bp->cnic_kwq_cons = bp->cnic_kwq;
13682 else
13683 bp->cnic_kwq_cons++;
13684 }
13685 bnx2x_sp_prod_update(bp);
13686 spin_unlock_bh(&bp->spq_lock);
13687}
13688
13689static int bnx2x_cnic_sp_queue(struct net_device *dev,
13690 struct kwqe_16 *kwqes[], u32 count)
13691{
13692 struct bnx2x *bp = netdev_priv(dev);
13693 int i;
13694
13695#ifdef BNX2X_STOP_ON_ERROR
13696 if (unlikely(bp->panic))
13697 return -EIO;
13698#endif
13699
13700 spin_lock_bh(&bp->spq_lock);
13701
13702 for (i = 0; i < count; i++) {
13703 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13704
13705 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13706 break;
13707
13708 *bp->cnic_kwq_prod = *spe;
13709
13710 bp->cnic_kwq_pending++;
13711
13712 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13713 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13714 spe->data.mac_config_addr.hi,
13715 spe->data.mac_config_addr.lo,
13716 bp->cnic_kwq_pending);
13717
13718 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13719 bp->cnic_kwq_prod = bp->cnic_kwq;
13720 else
13721 bp->cnic_kwq_prod++;
13722 }
13723
13724 spin_unlock_bh(&bp->spq_lock);
13725
13726 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13727 bnx2x_cnic_sp_post(bp, 0);
13728
13729 return i;
13730}
13731
13732static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13733{
13734 struct cnic_ops *c_ops;
13735 int rc = 0;
13736
13737 mutex_lock(&bp->cnic_mutex);
13738 c_ops = bp->cnic_ops;
13739 if (c_ops)
13740 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13741 mutex_unlock(&bp->cnic_mutex);
13742
13743 return rc;
13744}
13745
13746static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13747{
13748 struct cnic_ops *c_ops;
13749 int rc = 0;
13750
13751 rcu_read_lock();
13752 c_ops = rcu_dereference(bp->cnic_ops);
13753 if (c_ops)
13754 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13755 rcu_read_unlock();
13756
13757 return rc;
13758}
13759
13760/*
13761 * for commands that have no data
13762 */
13763static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13764{
13765 struct cnic_ctl_info ctl = {0};
13766
13767 ctl.cmd = cmd;
13768
13769 return bnx2x_cnic_ctl_send(bp, &ctl);
13770}
13771
13772static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13773{
13774 struct cnic_ctl_info ctl;
13775
13776 /* first we tell CNIC and only then we count this as a completion */
13777 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13778 ctl.data.comp.cid = cid;
13779
13780 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13781 bnx2x_cnic_sp_post(bp, 1);
13782}
13783
13784static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13785{
13786 struct bnx2x *bp = netdev_priv(dev);
13787 int rc = 0;
13788
13789 switch (ctl->cmd) {
13790 case DRV_CTL_CTXTBL_WR_CMD: {
13791 u32 index = ctl->data.io.offset;
13792 dma_addr_t addr = ctl->data.io.dma_addr;
13793
13794 bnx2x_ilt_wr(bp, index, addr);
13795 break;
13796 }
13797
13798 case DRV_CTL_COMPLETION_CMD: {
13799 int count = ctl->data.comp.comp_count;
13800
13801 bnx2x_cnic_sp_post(bp, count);
13802 break;
13803 }
13804
13805 /* rtnl_lock is held. */
13806 case DRV_CTL_START_L2_CMD: {
13807 u32 cli = ctl->data.ring.client_id;
13808
13809 bp->rx_mode_cl_mask |= (1 << cli);
13810 bnx2x_set_storm_rx_mode(bp);
13811 break;
13812 }
13813
13814 /* rtnl_lock is held. */
13815 case DRV_CTL_STOP_L2_CMD: {
13816 u32 cli = ctl->data.ring.client_id;
13817
13818 bp->rx_mode_cl_mask &= ~(1 << cli);
13819 bnx2x_set_storm_rx_mode(bp);
13820 break;
13821 }
13822
13823 default:
13824 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13825 rc = -EINVAL;
13826 }
13827
13828 return rc;
13829}
13830
13831static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13832{
13833 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13834
13835 if (bp->flags & USING_MSIX_FLAG) {
13836 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13837 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13838 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13839 } else {
13840 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13841 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13842 }
13843 cp->irq_arr[0].status_blk = bp->cnic_sb;
13844 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13845 cp->irq_arr[1].status_blk = bp->def_status_blk;
13846 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13847
13848 cp->num_irq = 2;
13849}
13850
13851static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13852 void *data)
13853{
13854 struct bnx2x *bp = netdev_priv(dev);
13855 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13856
13857 if (ops == NULL)
13858 return -EINVAL;
13859
13860 if (atomic_read(&bp->intr_sem) != 0)
13861 return -EBUSY;
13862
13863 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13864 if (!bp->cnic_kwq)
13865 return -ENOMEM;
13866
13867 bp->cnic_kwq_cons = bp->cnic_kwq;
13868 bp->cnic_kwq_prod = bp->cnic_kwq;
13869 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13870
13871 bp->cnic_spq_pending = 0;
13872 bp->cnic_kwq_pending = 0;
13873
13874 bp->cnic_data = data;
13875
13876 cp->num_irq = 0;
13877 cp->drv_state = CNIC_DRV_STATE_REGD;
13878
13879 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13880
13881 bnx2x_setup_cnic_irq_info(bp);
13882 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13883 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13884 rcu_assign_pointer(bp->cnic_ops, ops);
13885
13886 return 0;
13887}
13888
13889static int bnx2x_unregister_cnic(struct net_device *dev)
13890{
13891 struct bnx2x *bp = netdev_priv(dev);
13892 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13893
13894 mutex_lock(&bp->cnic_mutex);
13895 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13896 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13897 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13898 }
13899 cp->drv_state = 0;
13900 rcu_assign_pointer(bp->cnic_ops, NULL);
13901 mutex_unlock(&bp->cnic_mutex);
13902 synchronize_rcu();
13903 kfree(bp->cnic_kwq);
13904 bp->cnic_kwq = NULL;
13905
13906 return 0;
13907}
13908
13909struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13910{
13911 struct bnx2x *bp = netdev_priv(dev);
13912 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13913
13914 cp->drv_owner = THIS_MODULE;
13915 cp->chip_id = CHIP_ID(bp);
13916 cp->pdev = bp->pdev;
13917 cp->io_base = bp->regview;
13918 cp->io_base2 = bp->doorbells;
13919 cp->max_kwqe_pending = 8;
13920 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13921 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13922 cp->ctx_tbl_len = CNIC_ILT_LINES;
13923 cp->starting_cid = BCM_CNIC_CID_START;
13924 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13925 cp->drv_ctl = bnx2x_drv_ctl;
13926 cp->drv_register_cnic = bnx2x_register_cnic;
13927 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13928
13929 return cp;
13930}
13931EXPORT_SYMBOL(bnx2x_cnic_probe);
13932
13933#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013934