blob: 3f8402750c7a7460ee1bc66c5f73263a92e61676 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
52
Eilon Greenstein359d8b12009-02-12 08:38:25 +000053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
56
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -080057#define DRV_MODULE_VERSION "1.45.26"
58#define DRV_MODULE_RELDATE "2009/01/26"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070059#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020060
Eilon Greenstein34f80b02008-06-23 20:33:01 -070061/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Andrew Morton53a10562008-02-09 23:16:41 -080064static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070065 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020066 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070068MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020072
Eilon Greenstein555f6c72009-02-12 08:36:11 +000073static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
Eilon Greenstein19680c42008-08-13 15:47:33 -070076static int disable_tpa;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020078static int debug;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080
Eilon Greenstein19680c42008-08-13 15:47:33 -070081module_param(disable_tpa, int, 0);
Eilon Greenstein8badd272009-02-12 08:36:15 +000082
83static int int_mode;
84module_param(int_mode, int, 0);
85MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
86
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020087module_param(poll, int, 0);
Eilon Greenstein8d5726c2009-02-12 08:37:19 +000088
89static int mrrs = -1;
90module_param(mrrs, int, 0);
91MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
92
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020093module_param(debug, int, 0);
Eilon Greenstein19680c42008-08-13 15:47:33 -070094MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020095MODULE_PARM_DESC(poll, "use polling (for debug)");
Eliezer Tamirc14423f2008-02-28 11:49:42 -080096MODULE_PARM_DESC(debug, "default debug msglevel");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020097
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080098static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020099
100enum bnx2x_board_type {
101 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700102 BCM57711 = 1,
103 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200104};
105
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700106/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800107static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200108 char *name;
109} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200113};
114
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700115
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200181{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700182 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700216 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200217
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228
229 *wb_comp = 0;
230
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200232
233 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700238 if (!cnt) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700242 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700249
250 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200251}
252
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200254{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700255 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700292 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200293
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301
302 *wb_comp = 0;
303
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200305
306 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700310 if (!cnt) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700314 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200320 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700324
325 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200327
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
336}
337
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200354
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200360
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200363
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200372
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200380 }
381 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
464 }
465 }
466
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200467 return rc;
468}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800469
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000473 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800485 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800492 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505 BNX2X_ERR("begin crash dump -----------------\n");
506
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
517 struct bnx2x_fastpath *fp = &bp->fp[i];
518
519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
522 i, fp->rx_bd_prod, fp->rx_bd_cons,
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
531
532 /* Tx */
533 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
536
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700543 fp->status_blk->c_status_block.status_block_index,
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700544 hw_prods->packets_prod, hw_prods->bds_prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000545 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000554 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200560 }
561
Eilon Greenstein3196a882008-08-13 15:58:49 -0700562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000564 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200604
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700605 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200608}
609
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800610static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700612 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200617
618 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800633
Eilon Greenstein8badd272009-02-12 08:36:15 +0000634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800636
637 REG_WR(bp, addr, val);
638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
Eilon Greenstein8badd272009-02-12 08:36:15 +0000642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644
645 REG_WR(bp, addr, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660}
661
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800662static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200663{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700664 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
Eilon Greenstein8badd272009-02-12 08:36:15 +0000676 /* flush all outstanding writes */
677 mmiowb();
678
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
682}
683
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700684static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200685{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200686 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000687 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200688
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700689 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200690 atomic_inc(&bp->intr_sem);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700691 if (disable_hw)
692 /* prevent the HW from sending interrupts */
693 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200694
695 /* make sure all ISRs are done */
696 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000697 synchronize_irq(bp->msix_table[0].vector);
698 offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200699 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000700 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200701 } else
702 synchronize_irq(bp->pdev->irq);
703
704 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800705 cancel_delayed_work(&bp->sp_task);
706 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700709/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710
711/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700712 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200713 */
714
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700715static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200716 u8 storm, u16 index, u8 op, u8 update)
717{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700718 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
719 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200720 struct igu_ack_register igu_ack;
721
722 igu_ack.status_block_index = index;
723 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700724 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
726 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
727 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
728
Eilon Greenstein5c862842008-08-13 15:51:48 -0700729 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
730 (*(u32 *)&igu_ack), hc_addr);
731 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732}
733
734static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
735{
736 struct host_status_block *fpsb = fp->status_blk;
737 u16 rc = 0;
738
739 barrier(); /* status block is written to by the chip */
740 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
741 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
742 rc |= 1;
743 }
744 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
745 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
746 rc |= 2;
747 }
748 return rc;
749}
750
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751static u16 bnx2x_ack_int(struct bnx2x *bp)
752{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_SIMD_MASK);
755 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200756
Eilon Greenstein5c862842008-08-13 15:51:48 -0700757 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
758 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200760 return result;
761}
762
763
764/*
765 * fast path service functions
766 */
767
Eilon Greenstein237907c2009-01-14 06:42:44 +0000768static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
769{
770 u16 tx_cons_sb;
771
772 /* Tell compiler that status block fields can change */
773 barrier();
774 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800775 return (fp->tx_pkt_cons != tx_cons_sb);
776}
777
778static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
779{
780 /* Tell compiler that consumer and producer can change */
781 barrier();
782 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
783
Eilon Greenstein237907c2009-01-14 06:42:44 +0000784}
785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700808 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700811 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700846 WARN_ON(!skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852}
853
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200855{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700856 s16 used;
857 u16 prod;
858 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700860 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700868#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700872#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700874 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875}
876
877static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878{
879 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000880 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000889 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200901 hw_cons, sw_cons, pkt_cons);
902
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
911
912 if (done == work)
913 break;
914 }
915
916 fp->tx_pkt_cons = sw_cons;
917 fp->tx_bd_cons = bd_cons;
918
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000919 /* Need to make the tx_bd_cons update visible to start_xmit()
920 * before checking for netif_tx_queue_stopped(). Without the
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200921 * memory barrier, there is a small possibility that start_xmit()
922 * will miss it and cause the queue to be stopped forever.
923 */
924 smp_mb();
925
926 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000927 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000929 __netif_tx_lock(txq, smp_processor_id());
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200930
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000931 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700932 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200933 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000934 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000936 __netif_tx_unlock(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200937 }
938}
939
Eilon Greenstein3196a882008-08-13 15:58:49 -0700940
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200941static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942 union eth_rx_cqe *rr_cqe)
943{
944 struct bnx2x *bp = fp->bp;
945 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000950 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700951 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200952
953 bp->spq_left++;
954
Eilon Greenstein0626b892009-02-12 08:38:14 +0000955 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200956 switch (command | fp->state) {
957 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958 BNX2X_FP_STATE_OPENING):
959 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960 cid);
961 fp->state = BNX2X_FP_STATE_OPEN;
962 break;
963
964 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966 cid);
967 fp->state = BNX2X_FP_STATE_HALTED;
968 break;
969
970 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700971 BNX2X_ERR("unexpected MC reply (%d) "
972 "fp->state is %x\n", command, fp->state);
973 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200974 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700975 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200976 return;
977 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800978
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200979 switch (command | bp->state) {
980 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982 bp->state = BNX2X_STATE_OPEN;
983 break;
984
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988 fp->state = BNX2X_FP_STATE_HALTED;
989 break;
990
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700992 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800993 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200994 break;
995
Eilon Greenstein3196a882008-08-13 15:58:49 -0700996
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200997 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700998 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200999 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001000 bp->set_mac_pending = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001001 break;
1002
Eliezer Tamir49d66772008-02-28 11:53:13 -08001003 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001004 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Eliezer Tamir49d66772008-02-28 11:53:13 -08001005 break;
1006
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001007 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001008 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001009 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001010 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001011 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001012 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001013}
1014
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001015static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016 struct bnx2x_fastpath *fp, u16 index)
1017{
1018 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019 struct page *page = sw_buf->page;
1020 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022 /* Skip "next page" elements */
1023 if (!page)
1024 return;
1025
1026 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001027 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001028 __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030 sw_buf->page = NULL;
1031 sge->addr_hi = 0;
1032 sge->addr_lo = 0;
1033}
1034
1035static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036 struct bnx2x_fastpath *fp, int last)
1037{
1038 int i;
1039
1040 for (i = 0; i < last; i++)
1041 bnx2x_free_rx_sge(bp, fp, i);
1042}
1043
1044static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045 struct bnx2x_fastpath *fp, u16 index)
1046{
1047 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050 dma_addr_t mapping;
1051
1052 if (unlikely(page == NULL))
1053 return -ENOMEM;
1054
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001055 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001056 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001057 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059 return -ENOMEM;
1060 }
1061
1062 sw_buf->page = page;
1063 pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068 return 0;
1069}
1070
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001071static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, u16 index)
1073{
1074 struct sk_buff *skb;
1075 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077 dma_addr_t mapping;
1078
1079 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080 if (unlikely(skb == NULL))
1081 return -ENOMEM;
1082
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001083 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001084 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001085 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086 dev_kfree_skb(skb);
1087 return -ENOMEM;
1088 }
1089
1090 rx_buf->skb = skb;
1091 pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096 return 0;
1097}
1098
1099/* note that we are not allocating a new skb,
1100 * we are just moving one from cons to prod
1101 * we are not creating a new mapping,
1102 * so there is no need to check for dma_mapping_error().
1103 */
1104static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105 struct sk_buff *skb, u16 cons, u16 prod)
1106{
1107 struct bnx2x *bp = fp->bp;
1108 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113 pci_dma_sync_single_for_device(bp->pdev,
1114 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001115 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001116
1117 prod_rx_buf->skb = cons_rx_buf->skb;
1118 pci_unmap_addr_set(prod_rx_buf, mapping,
1119 pci_unmap_addr(cons_rx_buf, mapping));
1120 *prod_bd = *cons_bd;
1121}
1122
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001123static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124 u16 idx)
1125{
1126 u16 last_max = fp->last_max_sge;
1127
1128 if (SUB_S16(idx, last_max) > 0)
1129 fp->last_max_sge = idx;
1130}
1131
1132static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133{
1134 int i, j;
1135
1136 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137 int idx = RX_SGE_CNT * i - 1;
1138
1139 for (j = 0; j < 2; j++) {
1140 SGE_MASK_CLEAR_BIT(fp, idx);
1141 idx--;
1142 }
1143 }
1144}
1145
1146static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147 struct eth_fast_path_rx_cqe *fp_cqe)
1148{
1149 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001150 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001151 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001152 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001153 u16 last_max, last_elem, first_elem;
1154 u16 delta = 0;
1155 u16 i;
1156
1157 if (!sge_len)
1158 return;
1159
1160 /* First mark all used pages */
1161 for (i = 0; i < sge_len; i++)
1162 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167 /* Here we assume that the last SGE index is the biggest */
1168 prefetch((void *)(fp->sge_mask));
1169 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171 last_max = RX_SGE(fp->last_max_sge);
1172 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175 /* If ring is not full */
1176 if (last_elem + 1 != first_elem)
1177 last_elem++;
1178
1179 /* Now update the prod */
1180 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181 if (likely(fp->sge_mask[i]))
1182 break;
1183
1184 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185 delta += RX_SGE_MASK_ELEM_SZ;
1186 }
1187
1188 if (delta > 0) {
1189 fp->rx_sge_prod += delta;
1190 /* clear page-end entries */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192 }
1193
1194 DP(NETIF_MSG_RX_STATUS,
1195 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1196 fp->last_max_sge, fp->rx_sge_prod);
1197}
1198
1199static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200{
1201 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202 memset(fp->sge_mask, 0xff,
1203 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
Eilon Greenstein33471622008-08-13 15:59:08 -07001205 /* Clear the two last indices in the page to 1:
1206 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001207 hence will never be indicated and should be removed from
1208 the calculations. */
1209 bnx2x_clear_sge_mask_next_elems(fp);
1210}
1211
1212static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213 struct sk_buff *skb, u16 cons, u16 prod)
1214{
1215 struct bnx2x *bp = fp->bp;
1216 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_addr_t mapping;
1220
1221 /* move empty skb from pool to prod and map it */
1222 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001224 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001225 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227 /* move partial skb from cons to pool (don't unmap yet) */
1228 fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230 /* mark bin state as start - print error if current state != stop */
1231 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234 fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236 /* point prod_bd to new skb */
1237 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240#ifdef BNX2X_STOP_ON_ERROR
1241 fp->tpa_queue_used |= (1 << queue);
1242#ifdef __powerpc64__
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244#else
1245 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246#endif
1247 fp->tpa_queue_used);
1248#endif
1249}
1250
1251static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252 struct sk_buff *skb,
1253 struct eth_fast_path_rx_cqe *fp_cqe,
1254 u16 cqe_idx)
1255{
1256 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001257 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258 u32 i, frag_len, frag_size, pages;
1259 int err;
1260 int j;
1261
1262 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001263 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001264
1265 /* This is needed in order to enable forwarding support */
1266 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001267 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001268 max(frag_size, (u32)len_on_bd));
1269
1270#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001271 if (pages >
1272 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001273 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274 pages, cqe_idx);
1275 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1276 fp_cqe->pkt_len, len_on_bd);
1277 bnx2x_panic();
1278 return -EINVAL;
1279 }
1280#endif
1281
1282 /* Run through the SGL and compose the fragmented skb */
1283 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286 /* FW gives the indices of the SGE as if the ring is an array
1287 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001288 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001289 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001290 old_rx_pg = *rx_pg;
1291
1292 /* If we fail to allocate a substitute page, we simply stop
1293 where we are and drop the whole packet */
1294 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001296 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001297 return err;
1298 }
1299
1300 /* Unmap the page as we r going to pass it to the stack */
1301 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001302 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001303
1304 /* Add one frag and update the appropriate fields in the skb */
1305 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307 skb->data_len += frag_len;
1308 skb->truesize += frag_len;
1309 skb->len += frag_len;
1310
1311 frag_size -= frag_len;
1312 }
1313
1314 return 0;
1315}
1316
1317static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319 u16 cqe_idx)
1320{
1321 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322 struct sk_buff *skb = rx_buf->skb;
1323 /* alloc new skb */
1324 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326 /* Unmap skb in the pool anyway, as we are going to change
1327 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328 fails. */
1329 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001330 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001331
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001332 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001333 /* fix ip xsum and give it to the stack */
1334 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001335#ifdef BCM_VLAN
1336 int is_vlan_cqe =
1337 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338 PARSING_FLAGS_VLAN);
1339 int is_not_hwaccel_vlan_cqe =
1340 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001342
1343 prefetch(skb);
1344 prefetch(((char *)(skb)) + 128);
1345
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001346#ifdef BNX2X_STOP_ON_ERROR
1347 if (pad + len > bp->rx_buf_size) {
1348 BNX2X_ERR("skb_put is about to fail... "
1349 "pad %d len %d rx_buf_size %d\n",
1350 pad, len, bp->rx_buf_size);
1351 bnx2x_panic();
1352 return;
1353 }
1354#endif
1355
1356 skb_reserve(skb, pad);
1357 skb_put(skb, len);
1358
1359 skb->protocol = eth_type_trans(skb, bp->dev);
1360 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362 {
1363 struct iphdr *iph;
1364
1365 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001366#ifdef BCM_VLAN
1367 /* If there is no Rx VLAN offloading -
1368 take VLAN tag into an account */
1369 if (unlikely(is_not_hwaccel_vlan_cqe))
1370 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001372 iph->check = 0;
1373 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374 }
1375
1376 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377 &cqe->fast_path_cqe, cqe_idx)) {
1378#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001379 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001381 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382 le16_to_cpu(cqe->fast_path_cqe.
1383 vlan_tag));
1384 else
1385#endif
1386 netif_receive_skb(skb);
1387 } else {
1388 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389 " - dropping packet!\n");
1390 dev_kfree_skb(skb);
1391 }
1392
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001393
1394 /* put new skb in bin */
1395 fp->tpa_pool[queue].skb = new_skb;
1396
1397 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001398 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001399 DP(NETIF_MSG_RX_STATUS,
1400 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001401 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001402 }
1403
1404 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405}
1406
1407static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408 struct bnx2x_fastpath *fp,
1409 u16 bd_prod, u16 rx_comp_prod,
1410 u16 rx_sge_prod)
1411{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001412 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001413 int i;
1414
1415 /* Update producers */
1416 rx_prods.bd_prod = bd_prod;
1417 rx_prods.cqe_prod = rx_comp_prod;
1418 rx_prods.sge_prod = rx_sge_prod;
1419
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001420 /*
1421 * Make sure that the BD and SGE data is updated before updating the
1422 * producers since FW might read the BD/SGE right after the producer
1423 * is updated.
1424 * This is only applicable for weak-ordered memory model archs such
1425 * as IA-64. The following barrier is also mandatory since FW will
1426 * assumes BDs must have buffers.
1427 */
1428 wmb();
1429
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001430 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001432 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001433 ((u32 *)&rx_prods)[i]);
1434
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001435 mmiowb(); /* keep prod updates ordered */
1436
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001437 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001438 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1439 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001440}
1441
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001442static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443{
1444 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001445 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001446 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447 int rx_pkt = 0;
1448
1449#ifdef BNX2X_STOP_ON_ERROR
1450 if (unlikely(bp->panic))
1451 return 0;
1452#endif
1453
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001454 /* CQ "next element" is of the size of the regular element,
1455 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001456 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458 hw_comp_cons++;
1459
1460 bd_cons = fp->rx_bd_cons;
1461 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001462 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001463 sw_comp_cons = fp->rx_comp_cons;
1464 sw_comp_prod = fp->rx_comp_prod;
1465
1466 /* Memory barrier necessary as speculative reads of the rx
1467 * buffer can be ahead of the index in the status block
1468 */
1469 rmb();
1470
1471 DP(NETIF_MSG_RX_STATUS,
1472 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001473 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001474
1475 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001476 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001477 struct sk_buff *skb;
1478 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001479 u8 cqe_fp_flags;
1480 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001481
1482 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483 bd_prod = RX_BD(bd_prod);
1484 bd_cons = RX_BD(bd_cons);
1485
1486 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001487 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001488
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001489 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001490 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1491 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001492 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001493 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001495
1496 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001497 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001498 bnx2x_sp_event(fp, cqe);
1499 goto next_cqe;
1500
1501 /* this is an rx packet */
1502 } else {
1503 rx_buf = &fp->rx_buf_ring[bd_cons];
1504 skb = rx_buf->skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001505 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506 pad = cqe->fast_path_cqe.placement_offset;
1507
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001508 /* If CQE is marked both TPA_START and TPA_END
1509 it is a non-TPA CQE */
1510 if ((!fp->disable_tpa) &&
1511 (TPA_TYPE(cqe_fp_flags) !=
1512 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001513 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001514
1515 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516 DP(NETIF_MSG_RX_STATUS,
1517 "calling tpa_start on queue %d\n",
1518 queue);
1519
1520 bnx2x_tpa_start(fp, queue, skb,
1521 bd_cons, bd_prod);
1522 goto next_rx;
1523 }
1524
1525 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526 DP(NETIF_MSG_RX_STATUS,
1527 "calling tpa_stop on queue %d\n",
1528 queue);
1529
1530 if (!BNX2X_RX_SUM_FIX(cqe))
1531 BNX2X_ERR("STOP on none TCP "
1532 "data\n");
1533
1534 /* This is a size of the linear data
1535 on this skb */
1536 len = le16_to_cpu(cqe->fast_path_cqe.
1537 len_on_bd);
1538 bnx2x_tpa_stop(bp, fp, queue, pad,
1539 len, cqe, comp_ring_cons);
1540#ifdef BNX2X_STOP_ON_ERROR
1541 if (bp->panic)
1542 return -EINVAL;
1543#endif
1544
1545 bnx2x_update_sge_prod(fp,
1546 &cqe->fast_path_cqe);
1547 goto next_cqe;
1548 }
1549 }
1550
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001551 pci_dma_sync_single_for_device(bp->pdev,
1552 pci_unmap_addr(rx_buf, mapping),
1553 pad + RX_COPY_THRESH,
1554 PCI_DMA_FROMDEVICE);
1555 prefetch(skb);
1556 prefetch(((char *)(skb)) + 128);
1557
1558 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001559 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001560 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001561 "ERROR flags %x rx packet %u\n",
1562 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001563 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001564 goto reuse_rx;
1565 }
1566
1567 /* Since we don't have a jumbo ring
1568 * copy small packets if mtu > 1500
1569 */
1570 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571 (len <= RX_COPY_THRESH)) {
1572 struct sk_buff *new_skb;
1573
1574 new_skb = netdev_alloc_skb(bp->dev,
1575 len + pad);
1576 if (new_skb == NULL) {
1577 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001578 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001579 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001580 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001581 goto reuse_rx;
1582 }
1583
1584 /* aligned copy */
1585 skb_copy_from_linear_data_offset(skb, pad,
1586 new_skb->data + pad, len);
1587 skb_reserve(new_skb, pad);
1588 skb_put(new_skb, len);
1589
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592 skb = new_skb;
1593
1594 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595 pci_unmap_single(bp->pdev,
1596 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001597 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001598 PCI_DMA_FROMDEVICE);
1599 skb_reserve(skb, pad);
1600 skb_put(skb, len);
1601
1602 } else {
1603 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001604 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001605 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001606 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001607reuse_rx:
1608 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 goto next_rx;
1610 }
1611
1612 skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001615 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001616 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001618 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001619 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001620 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001621 }
1622
Eilon Greenstein748e5432009-02-12 08:36:37 +00001623 skb_record_rx_queue(skb, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001625 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001626 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001628 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630 else
1631#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001632 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001633
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001634
1635next_rx:
1636 rx_buf->skb = NULL;
1637
1638 bd_cons = NEXT_RX_IDX(bd_cons);
1639 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001640 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001642next_cqe:
1643 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001646 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001647 break;
1648 } /* while */
1649
1650 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001651 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001652 fp->rx_comp_cons = sw_comp_cons;
1653 fp->rx_comp_prod = sw_comp_prod;
1654
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001655 /* Update producers */
1656 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001658
1659 fp->rx_pkt += rx_pkt;
1660 fp->rx_calls++;
1661
1662 return rx_pkt;
1663}
1664
1665static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666{
1667 struct bnx2x_fastpath *fp = fp_cookie;
1668 struct bnx2x *bp = fp->bp;
Eilon Greenstein0626b892009-02-12 08:38:14 +00001669 int index = fp->index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001670
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001671 /* Return here if interrupt is disabled */
1672 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674 return IRQ_HANDLED;
1675 }
1676
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001677 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001678 index, fp->sb_id);
1679 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001680
1681#ifdef BNX2X_STOP_ON_ERROR
1682 if (unlikely(bp->panic))
1683 return IRQ_HANDLED;
1684#endif
1685
1686 prefetch(fp->rx_cons_sb);
1687 prefetch(fp->tx_cons_sb);
1688 prefetch(&fp->status_blk->c_status_block.status_block_index);
1689 prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
Ben Hutchings288379f2009-01-19 16:43:59 -08001691 napi_schedule(&bnx2x_fp(bp, index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001692
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001693 return IRQ_HANDLED;
1694}
1695
1696static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001698 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001699 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001700 u16 mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001702 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001703 if (unlikely(status == 0)) {
1704 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705 return IRQ_NONE;
1706 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001707 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001708
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001709 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
Eilon Greenstein3196a882008-08-13 15:58:49 -07001715#ifdef BNX2X_STOP_ON_ERROR
1716 if (unlikely(bp->panic))
1717 return IRQ_HANDLED;
1718#endif
1719
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001720 mask = 0x2 << bp->fp[0].sb_id;
1721 if (status & mask) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001722 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724 prefetch(fp->rx_cons_sb);
1725 prefetch(fp->tx_cons_sb);
1726 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
Ben Hutchings288379f2009-01-19 16:43:59 -08001729 napi_schedule(&bnx2x_fp(bp, 0, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001730
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001731 status &= ~mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001732 }
1733
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001734
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001735 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001736 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001737
1738 status &= ~0x1;
1739 if (!status)
1740 return IRQ_HANDLED;
1741 }
1742
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001743 if (status)
1744 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001746
1747 return IRQ_HANDLED;
1748}
1749
1750/* end of fast path */
1751
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001752static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001753
1754/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001755
1756/*
1757 * General service functions
1758 */
1759
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001760static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001761{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001762 u32 lock_status;
1763 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001764 int func = BP_FUNC(bp);
1765 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001766 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001767
1768 /* Validating that the resource is within range */
1769 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770 DP(NETIF_MSG_HW,
1771 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773 return -EINVAL;
1774 }
1775
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001776 if (func <= 5) {
1777 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778 } else {
1779 hw_lock_control_reg =
1780 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781 }
1782
Eliezer Tamirf1410642008-02-28 11:51:50 -08001783 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001784 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001785 if (lock_status & resource_bit) {
1786 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1787 lock_status, resource_bit);
1788 return -EEXIST;
1789 }
1790
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001791 /* Try for 5 second every 5ms */
1792 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001793 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001794 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001796 if (lock_status & resource_bit)
1797 return 0;
1798
1799 msleep(5);
1800 }
1801 DP(NETIF_MSG_HW, "Timeout\n");
1802 return -EAGAIN;
1803}
1804
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001805static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001806{
1807 u32 lock_status;
1808 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001809 int func = BP_FUNC(bp);
1810 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001811
1812 /* Validating that the resource is within range */
1813 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814 DP(NETIF_MSG_HW,
1815 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817 return -EINVAL;
1818 }
1819
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001820 if (func <= 5) {
1821 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822 } else {
1823 hw_lock_control_reg =
1824 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825 }
1826
Eliezer Tamirf1410642008-02-28 11:51:50 -08001827 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001828 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001829 if (!(lock_status & resource_bit)) {
1830 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1831 lock_status, resource_bit);
1832 return -EFAULT;
1833 }
1834
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001835 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001836 return 0;
1837}
1838
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001839/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001840static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001841{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001842 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001843
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001844 if (bp->port.need_hw_lock)
1845 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001846}
1847
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001848static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001849{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001850 if (bp->port.need_hw_lock)
1851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001852
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001853 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001854}
1855
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001856int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857{
1858 /* The GPIO should be swapped if swap register is set and active */
1859 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861 int gpio_shift = gpio_num +
1862 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863 u32 gpio_mask = (1 << gpio_shift);
1864 u32 gpio_reg;
1865 int value;
1866
1867 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869 return -EINVAL;
1870 }
1871
1872 /* read GPIO value */
1873 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875 /* get the requested pin value */
1876 if ((gpio_reg & gpio_mask) == gpio_mask)
1877 value = 1;
1878 else
1879 value = 0;
1880
1881 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1882
1883 return value;
1884}
1885
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001886int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001887{
1888 /* The GPIO should be swapped if swap register is set and active */
1889 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001890 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001891 int gpio_shift = gpio_num +
1892 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893 u32 gpio_mask = (1 << gpio_shift);
1894 u32 gpio_reg;
1895
1896 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898 return -EINVAL;
1899 }
1900
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001901 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001902 /* read GPIO and mask except the float bits */
1903 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1904
1905 switch (mode) {
1906 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908 gpio_num, gpio_shift);
1909 /* clear FLOAT and set CLR */
1910 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912 break;
1913
1914 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920 break;
1921
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001923 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924 gpio_num, gpio_shift);
1925 /* set FLOAT */
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927 break;
1928
1929 default:
1930 break;
1931 }
1932
1933 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001935
1936 return 0;
1937}
1938
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001939int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940{
1941 /* The GPIO should be swapped if swap register is set and active */
1942 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944 int gpio_shift = gpio_num +
1945 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946 u32 gpio_mask = (1 << gpio_shift);
1947 u32 gpio_reg;
1948
1949 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951 return -EINVAL;
1952 }
1953
1954 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955 /* read GPIO int */
1956 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958 switch (mode) {
1959 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961 "output low\n", gpio_num, gpio_shift);
1962 /* clear SET and set CLR */
1963 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965 break;
1966
1967 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969 "output high\n", gpio_num, gpio_shift);
1970 /* clear CLR and set SET */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973 break;
1974
1975 default:
1976 break;
1977 }
1978
1979 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982 return 0;
1983}
1984
Eliezer Tamirf1410642008-02-28 11:51:50 -08001985static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1986{
1987 u32 spio_mask = (1 << spio_num);
1988 u32 spio_reg;
1989
1990 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991 (spio_num > MISC_REGISTERS_SPIO_7)) {
1992 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993 return -EINVAL;
1994 }
1995
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001996 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001997 /* read SPIO and mask except the float bits */
1998 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1999
2000 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002001 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002002 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003 /* clear FLOAT and set CLR */
2004 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006 break;
2007
Eilon Greenstein6378c022008-08-13 15:59:25 -07002008 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010 /* clear FLOAT and set SET */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013 break;
2014
2015 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017 /* set FLOAT */
2018 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 break;
2020
2021 default:
2022 break;
2023 }
2024
2025 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002027
2028 return 0;
2029}
2030
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002031static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002032{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002033 switch (bp->link_vars.ieee_fc &
2034 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002035 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002036 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002037 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002038 break;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002039 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002040 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002041 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002042 break;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002044 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002045 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002046 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002047 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002048 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002049 break;
2050 }
2051}
2052
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002053static void bnx2x_link_report(struct bnx2x *bp)
2054{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002055 if (bp->link_vars.link_up) {
2056 if (bp->state == BNX2X_STATE_OPEN)
2057 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002058 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2059
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002060 printk("%d Mbps ", bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002061
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002062 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002063 printk("full duplex");
2064 else
2065 printk("half duplex");
2066
David S. Millerc0700f92008-12-16 23:53:20 -08002067 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2068 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002069 printk(", receive ");
David S. Millerc0700f92008-12-16 23:53:20 -08002070 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002071 printk("& transmit ");
2072 } else {
2073 printk(", transmit ");
2074 }
2075 printk("flow control ON");
2076 }
2077 printk("\n");
2078
2079 } else { /* link_down */
2080 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002081 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002082 }
2083}
2084
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002085static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002086{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002087 if (!BP_NOMCP(bp)) {
2088 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002089
Eilon Greenstein19680c42008-08-13 15:47:33 -07002090 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002091 /* It is recommended to turn off RX FC for jumbo frames
2092 for better performance */
2093 if (IS_E1HMF(bp))
David S. Millerc0700f92008-12-16 23:53:20 -08002094 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002095 else if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002096 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002097 else
David S. Millerc0700f92008-12-16 23:53:20 -08002098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002099
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002100 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002101
2102 if (load_mode == LOAD_DIAG)
2103 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2104
Eilon Greenstein19680c42008-08-13 15:47:33 -07002105 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002106
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002107 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002108
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002109 bnx2x_calc_fc_adv(bp);
2110
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002111 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2112 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002113 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002114 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115
Eilon Greenstein19680c42008-08-13 15:47:33 -07002116 return rc;
2117 }
2118 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2119 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002120}
2121
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002122static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002123{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002124 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002125 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002126 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002127 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002128
Eilon Greenstein19680c42008-08-13 15:47:33 -07002129 bnx2x_calc_fc_adv(bp);
2130 } else
2131 BNX2X_ERR("Bootcode is missing -not setting link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002132}
2133
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002134static void bnx2x__link_reset(struct bnx2x *bp)
2135{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002136 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002137 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002138 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002139 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002140 } else
2141 BNX2X_ERR("Bootcode is missing -not resetting link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002142}
2143
2144static u8 bnx2x_link_test(struct bnx2x *bp)
2145{
2146 u8 rc;
2147
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002148 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002149 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002150 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002151
2152 return rc;
2153}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002154
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002155static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002156{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002157 u32 r_param = bp->link_vars.line_speed / 8;
2158 u32 fair_periodic_timeout_usec;
2159 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002160
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002161 memset(&(bp->cmng.rs_vars), 0,
2162 sizeof(struct rate_shaping_vars_per_port));
2163 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002164
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002165 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2166 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002167
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002168 /* this is the threshold below which no timer arming will occur
2169 1.25 coefficient is for the threshold to be a little bigger
2170 than the real time, to compensate for timer in-accuracy */
2171 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002172 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2173
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002174 /* resolution of fairness timer */
2175 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2176 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2177 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002178
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002179 /* this is the threshold below which we won't arm the timer anymore */
2180 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002181
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002182 /* we multiply by 1e3/8 to get bytes/msec.
2183 We don't want the credits to pass a credit
2184 of the t_fair*FAIR_MEM (algorithm resolution) */
2185 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2186 /* since each tick is 4 usec */
2187 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002188}
2189
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002190static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002191{
2192 struct rate_shaping_vars_per_vn m_rs_vn;
2193 struct fairness_vars_per_vn m_fair_vn;
2194 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2195 u16 vn_min_rate, vn_max_rate;
2196 int i;
2197
2198 /* If function is hidden - set min and max to zeroes */
2199 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2200 vn_min_rate = 0;
2201 vn_max_rate = 0;
2202
2203 } else {
2204 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2205 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002206 /* If fairness is enabled (not all min rates are zeroes) and
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002207 if current min rate is zero - set it to 1.
Eilon Greenstein33471622008-08-13 15:59:08 -07002208 This is a requirement of the algorithm. */
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002209 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002210 vn_min_rate = DEF_MIN_RATE;
2211 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2212 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2213 }
2214
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002215 DP(NETIF_MSG_IFUP,
2216 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2217 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002218
2219 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2220 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2221
2222 /* global vn counter - maximal Mbps for this vn */
2223 m_rs_vn.vn_counter.rate = vn_max_rate;
2224
2225 /* quota - number of bytes transmitted in this period */
2226 m_rs_vn.vn_counter.quota =
2227 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2228
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002229 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002230 /* credit for each period of the fairness algorithm:
2231 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002232 vn_weight_sum should not be larger than 10000, thus
2233 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2234 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002235 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002236 max((u32)(vn_min_rate * (T_FAIR_COEF /
2237 (8 * bp->vn_weight_sum))),
2238 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002239 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2240 m_fair_vn.vn_credit_delta);
2241 }
2242
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002243 /* Store it to internal memory */
2244 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_rs_vn))[i]);
2248
2249 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2250 REG_WR(bp, BAR_XSTRORM_INTMEM +
2251 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2252 ((u32 *)(&m_fair_vn))[i]);
2253}
2254
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002255
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002256/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002257static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002258{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002259 /* Make sure that we are synced with the current statistics */
2260 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2261
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002262 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002263
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002264 if (bp->link_vars.link_up) {
2265
Eilon Greenstein1c063282009-02-12 08:36:43 +00002266 /* dropless flow control */
2267 if (CHIP_IS_E1H(bp)) {
2268 int port = BP_PORT(bp);
2269 u32 pause_enabled = 0;
2270
2271 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2272 pause_enabled = 1;
2273
2274 REG_WR(bp, BAR_USTRORM_INTMEM +
2275 USTORM_PAUSE_ENABLED_OFFSET(port),
2276 pause_enabled);
2277 }
2278
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2280 struct host_port_stats *pstats;
2281
2282 pstats = bnx2x_sp(bp, port_stats);
2283 /* reset old bmac stats */
2284 memset(&(pstats->mac_stx[0]), 0,
2285 sizeof(struct mac_stx));
2286 }
2287 if ((bp->state == BNX2X_STATE_OPEN) ||
2288 (bp->state == BNX2X_STATE_DISABLED))
2289 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290 }
2291
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002292 /* indicate link status */
2293 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002294
2295 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002296 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002297 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002298 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002299
2300 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2301 if (vn == BP_E1HVN(bp))
2302 continue;
2303
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002304 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002305
2306 /* Set the attention towards other drivers
2307 on the same port */
2308 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2309 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2310 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002311
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002312 if (bp->link_vars.link_up) {
2313 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002314
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002315 /* Init rate shaping and fairness contexts */
2316 bnx2x_init_port_minmax(bp);
2317
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002318 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002319 bnx2x_init_vn_minmax(bp, 2*vn + port);
2320
2321 /* Store it to internal memory */
2322 for (i = 0;
2323 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2324 REG_WR(bp, BAR_XSTRORM_INTMEM +
2325 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2326 ((u32 *)(&bp->cmng))[i]);
2327 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002328 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002329}
2330
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002331static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002332{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002333 if (bp->state != BNX2X_STATE_OPEN)
2334 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002335
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002336 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2337
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002338 if (bp->link_vars.link_up)
2339 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2340 else
2341 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2342
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002343 /* indicate link status */
2344 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002345}
2346
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002347static void bnx2x_pmf_update(struct bnx2x *bp)
2348{
2349 int port = BP_PORT(bp);
2350 u32 val;
2351
2352 bp->port.pmf = 1;
2353 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2354
2355 /* enable nig attention */
2356 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2357 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2358 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002359
2360 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002361}
2362
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002363/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002364
2365/* slow path */
2366
2367/*
2368 * General service functions
2369 */
2370
2371/* the slow path queue is odd since completions arrive on the fastpath ring */
2372static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2373 u32 data_hi, u32 data_lo, int common)
2374{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002375 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002376
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2378 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002379 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2380 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2381 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2382
2383#ifdef BNX2X_STOP_ON_ERROR
2384 if (unlikely(bp->panic))
2385 return -EIO;
2386#endif
2387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002388 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002389
2390 if (!bp->spq_left) {
2391 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002392 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002393 bnx2x_panic();
2394 return -EBUSY;
2395 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002396
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002397 /* CID needs port number to be encoded int it */
2398 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2399 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2400 HW_CID(bp, cid)));
2401 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2402 if (common)
2403 bp->spq_prod_bd->hdr.type |=
2404 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2405
2406 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2407 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2408
2409 bp->spq_left--;
2410
2411 if (bp->spq_prod_bd == bp->spq_last_bd) {
2412 bp->spq_prod_bd = bp->spq;
2413 bp->spq_prod_idx = 0;
2414 DP(NETIF_MSG_TIMER, "end of spq\n");
2415
2416 } else {
2417 bp->spq_prod_bd++;
2418 bp->spq_prod_idx++;
2419 }
2420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002421 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002422 bp->spq_prod_idx);
2423
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002424 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002425 return 0;
2426}
2427
2428/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002429static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002430{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002431 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002432 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002433
2434 might_sleep();
2435 i = 100;
2436 for (j = 0; j < i*10; j++) {
2437 val = (1UL << 31);
2438 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2439 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2440 if (val & (1L << 31))
2441 break;
2442
2443 msleep(5);
2444 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002445 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002446 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002447 rc = -EBUSY;
2448 }
2449
2450 return rc;
2451}
2452
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002453/* release split MCP access lock register */
2454static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002455{
2456 u32 val = 0;
2457
2458 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2459}
2460
2461static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2462{
2463 struct host_def_status_block *def_sb = bp->def_status_blk;
2464 u16 rc = 0;
2465
2466 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002467 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2468 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2469 rc |= 1;
2470 }
2471 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2472 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2473 rc |= 2;
2474 }
2475 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2476 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2477 rc |= 4;
2478 }
2479 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2480 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2481 rc |= 8;
2482 }
2483 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2484 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2485 rc |= 16;
2486 }
2487 return rc;
2488}
2489
2490/*
2491 * slow path service functions
2492 */
2493
2494static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2495{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002496 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002497 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2498 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002499 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2500 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002501 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2502 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002503 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002504 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002506 if (bp->attn_state & asserted)
2507 BNX2X_ERR("IGU ERROR\n");
2508
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2510 aeu_mask = REG_RD(bp, aeu_addr);
2511
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002512 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002513 aeu_mask, asserted);
2514 aeu_mask &= ~(asserted & 0xff);
2515 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002516
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002517 REG_WR(bp, aeu_addr, aeu_mask);
2518 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002519
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002520 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002521 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002522 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002523
2524 if (asserted & ATTN_HARD_WIRED_MASK) {
2525 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002526
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002527 bnx2x_acquire_phy_lock(bp);
2528
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002529 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002530 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002531 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002532
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002533 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002534
2535 /* handle unicore attn? */
2536 }
2537 if (asserted & ATTN_SW_TIMER_4_FUNC)
2538 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2539
2540 if (asserted & GPIO_2_FUNC)
2541 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2542
2543 if (asserted & GPIO_3_FUNC)
2544 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2545
2546 if (asserted & GPIO_4_FUNC)
2547 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2548
2549 if (port == 0) {
2550 if (asserted & ATTN_GENERAL_ATTN_1) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2553 }
2554 if (asserted & ATTN_GENERAL_ATTN_2) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2557 }
2558 if (asserted & ATTN_GENERAL_ATTN_3) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2561 }
2562 } else {
2563 if (asserted & ATTN_GENERAL_ATTN_4) {
2564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2566 }
2567 if (asserted & ATTN_GENERAL_ATTN_5) {
2568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2570 }
2571 if (asserted & ATTN_GENERAL_ATTN_6) {
2572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2574 }
2575 }
2576
2577 } /* if hardwired */
2578
Eilon Greenstein5c862842008-08-13 15:51:48 -07002579 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2580 asserted, hc_addr);
2581 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002582
2583 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002584 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002585 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002586 bnx2x_release_phy_lock(bp);
2587 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002588}
2589
2590static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2591{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002592 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002593 int reg_offset;
2594 u32 val;
2595
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002596 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2597 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002598
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002599 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002600
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2603 REG_WR(bp, reg_offset, val);
2604
2605 BNX2X_ERR("SPIO5 hw attention\n");
2606
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002607 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002609 /* Fan failure attention */
2610
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002611 /* The PHY reset is controlled by GPIO 1 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002612 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002613 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2614 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002615 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002616 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002617 /* mark the failure */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002618 bp->link_params.ext_phy_config &=
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002619 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002620 bp->link_params.ext_phy_config |=
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002621 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2622 SHMEM_WR(bp,
2623 dev_info.port_hw_config[port].
2624 external_phy_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002625 bp->link_params.ext_phy_config);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002626 /* log the failure */
2627 printk(KERN_ERR PFX "Fan Failure on Network"
2628 " Controller %s has caused the driver to"
2629 " shutdown the card to prevent permanent"
2630 " damage. Please contact Dell Support for"
2631 " assistance\n", bp->dev->name);
2632 break;
2633
2634 default:
2635 break;
2636 }
2637 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002638
Eilon Greenstein589abe32009-02-12 08:36:55 +00002639 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2640 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2641 bnx2x_acquire_phy_lock(bp);
2642 bnx2x_handle_module_detect_int(&bp->link_params);
2643 bnx2x_release_phy_lock(bp);
2644 }
2645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002646 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2647
2648 val = REG_RD(bp, reg_offset);
2649 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2650 REG_WR(bp, reg_offset, val);
2651
2652 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2653 (attn & HW_INTERRUT_ASSERT_SET_0));
2654 bnx2x_panic();
2655 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002656}
2657
2658static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2659{
2660 u32 val;
2661
Eilon Greenstein0626b892009-02-12 08:38:14 +00002662 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002663
2664 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2665 BNX2X_ERR("DB hw attention 0x%x\n", val);
2666 /* DORQ discard attention */
2667 if (val & 0x2)
2668 BNX2X_ERR("FATAL error from DORQ\n");
2669 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002670
2671 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2672
2673 int port = BP_PORT(bp);
2674 int reg_offset;
2675
2676 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2677 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2678
2679 val = REG_RD(bp, reg_offset);
2680 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2681 REG_WR(bp, reg_offset, val);
2682
2683 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2684 (attn & HW_INTERRUT_ASSERT_SET_1));
2685 bnx2x_panic();
2686 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002687}
2688
2689static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2690{
2691 u32 val;
2692
2693 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2694
2695 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2696 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2697 /* CFC error attention */
2698 if (val & 0x2)
2699 BNX2X_ERR("FATAL error from CFC\n");
2700 }
2701
2702 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2703
2704 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2705 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2706 /* RQ_USDMDP_FIFO_OVERFLOW */
2707 if (val & 0x18000)
2708 BNX2X_ERR("FATAL error from PXP\n");
2709 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002710
2711 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2712
2713 int port = BP_PORT(bp);
2714 int reg_offset;
2715
2716 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2717 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2718
2719 val = REG_RD(bp, reg_offset);
2720 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2721 REG_WR(bp, reg_offset, val);
2722
2723 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2724 (attn & HW_INTERRUT_ASSERT_SET_2));
2725 bnx2x_panic();
2726 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002727}
2728
2729static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2730{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002731 u32 val;
2732
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002733 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2734
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002735 if (attn & BNX2X_PMF_LINK_ASSERT) {
2736 int func = BP_FUNC(bp);
2737
2738 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2739 bnx2x__link_status_update(bp);
2740 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2741 DRV_STATUS_PMF)
2742 bnx2x_pmf_update(bp);
2743
2744 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002745
2746 BNX2X_ERR("MC assert!\n");
2747 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2751 bnx2x_panic();
2752
2753 } else if (attn & BNX2X_MCP_ASSERT) {
2754
2755 BNX2X_ERR("MCP assert!\n");
2756 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002757 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002758
2759 } else
2760 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2761 }
2762
2763 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002764 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2765 if (attn & BNX2X_GRC_TIMEOUT) {
2766 val = CHIP_IS_E1H(bp) ?
2767 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2768 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2769 }
2770 if (attn & BNX2X_GRC_RSV) {
2771 val = CHIP_IS_E1H(bp) ?
2772 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2773 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2774 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002775 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002776 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002777}
2778
2779static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2780{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002781 struct attn_route attn;
2782 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002783 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002784 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002785 u32 reg_addr;
2786 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002787 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002788
2789 /* need to take HW lock because MCP or other port might also
2790 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002791 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002792
2793 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2794 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2795 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2796 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002797 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2798 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799
2800 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2801 if (deasserted & (1 << index)) {
2802 group_mask = bp->attn_group[index];
2803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002804 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2805 index, group_mask.sig[0], group_mask.sig[1],
2806 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002807
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002808 bnx2x_attn_int_deasserted3(bp,
2809 attn.sig[3] & group_mask.sig[3]);
2810 bnx2x_attn_int_deasserted1(bp,
2811 attn.sig[1] & group_mask.sig[1]);
2812 bnx2x_attn_int_deasserted2(bp,
2813 attn.sig[2] & group_mask.sig[2]);
2814 bnx2x_attn_int_deasserted0(bp,
2815 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816
2817 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002818 HW_PRTY_ASSERT_SET_0) ||
2819 (attn.sig[1] & group_mask.sig[1] &
2820 HW_PRTY_ASSERT_SET_1) ||
2821 (attn.sig[2] & group_mask.sig[2] &
2822 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07002823 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002824 }
2825 }
2826
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002827 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002828
Eilon Greenstein5c862842008-08-13 15:51:48 -07002829 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002830
2831 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002834 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002836 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002837 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002838
2839 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2840 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2841
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2843 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002844
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002845 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2846 aeu_mask, deasserted);
2847 aeu_mask |= (deasserted & 0xff);
2848 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2849
2850 REG_WR(bp, reg_addr, aeu_mask);
2851 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002852
2853 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2854 bp->attn_state &= ~deasserted;
2855 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2856}
2857
2858static void bnx2x_attn_int(struct bnx2x *bp)
2859{
2860 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08002861 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2862 attn_bits);
2863 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2864 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002865 u32 attn_state = bp->attn_state;
2866
2867 /* look for changed bits */
2868 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2869 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2870
2871 DP(NETIF_MSG_HW,
2872 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2873 attn_bits, attn_ack, asserted, deasserted);
2874
2875 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002876 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002877
2878 /* handle bits that were raised */
2879 if (asserted)
2880 bnx2x_attn_int_asserted(bp, asserted);
2881
2882 if (deasserted)
2883 bnx2x_attn_int_deasserted(bp, deasserted);
2884}
2885
2886static void bnx2x_sp_task(struct work_struct *work)
2887{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002888 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002889 u16 status;
2890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002891
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002892 /* Return here if interrupt is disabled */
2893 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002894 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002895 return;
2896 }
2897
2898 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002899/* if (status == 0) */
2900/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002901
Eilon Greenstein3196a882008-08-13 15:58:49 -07002902 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002903
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002904 /* HW attentions */
2905 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002906 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002907
Eilon Greenstein68d59482009-01-14 21:27:36 -08002908 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002909 IGU_INT_NOP, 1);
2910 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2913 IGU_INT_NOP, 1);
2914 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2915 IGU_INT_NOP, 1);
2916 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2917 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002918
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002919}
2920
2921static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2922{
2923 struct net_device *dev = dev_instance;
2924 struct bnx2x *bp = netdev_priv(dev);
2925
2926 /* Return here if interrupt is disabled */
2927 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002928 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002929 return IRQ_HANDLED;
2930 }
2931
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08002932 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002933
2934#ifdef BNX2X_STOP_ON_ERROR
2935 if (unlikely(bp->panic))
2936 return IRQ_HANDLED;
2937#endif
2938
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002939 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002940
2941 return IRQ_HANDLED;
2942}
2943
2944/* end of slow path */
2945
2946/* Statistics */
2947
2948/****************************************************************************
2949* Macros
2950****************************************************************************/
2951
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002952/* sum[hi:lo] += add[hi:lo] */
2953#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2954 do { \
2955 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08002956 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002957 } while (0)
2958
2959/* difference = minuend - subtrahend */
2960#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2961 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002962 if (m_lo < s_lo) { \
2963 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002964 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002965 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002966 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002967 d_hi--; \
2968 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002969 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002970 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002971 d_hi = 0; \
2972 d_lo = 0; \
2973 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002974 } else { \
2975 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002976 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002977 d_hi = 0; \
2978 d_lo = 0; \
2979 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002980 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002981 d_hi = m_hi - s_hi; \
2982 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002983 } \
2984 } \
2985 } while (0)
2986
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002987#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002988 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002989 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2990 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2991 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2992 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2993 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2994 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002995 } while (0)
2996
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002997#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002998 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002999 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3000 diff.lo, new->s##_lo, old->s##_lo); \
3001 ADD_64(estats->t##_hi, diff.hi, \
3002 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003003 } while (0)
3004
3005/* sum[hi:lo] += add */
3006#define ADD_EXTEND_64(s_hi, s_lo, a) \
3007 do { \
3008 s_lo += a; \
3009 s_hi += (s_lo < a) ? 1 : 0; \
3010 } while (0)
3011
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003012#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003013 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003014 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3015 pstats->mac_stx[1].s##_lo, \
3016 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003017 } while (0)
3018
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003019#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003020 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003021 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3022 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003023 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3024 } while (0)
3025
3026#define UPDATE_EXTEND_USTAT(s, t) \
3027 do { \
3028 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3029 old_uclient->s = uclient->s; \
3030 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003031 } while (0)
3032
3033#define UPDATE_EXTEND_XSTAT(s, t) \
3034 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003035 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3036 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003037 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3038 } while (0)
3039
3040/* minuend -= subtrahend */
3041#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3042 do { \
3043 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3044 } while (0)
3045
3046/* minuend[hi:lo] -= subtrahend */
3047#define SUB_EXTEND_64(m_hi, m_lo, s) \
3048 do { \
3049 SUB_64(m_hi, 0, m_lo, s); \
3050 } while (0)
3051
3052#define SUB_EXTEND_USTAT(s, t) \
3053 do { \
3054 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3055 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003056 } while (0)
3057
3058/*
3059 * General service functions
3060 */
3061
3062static inline long bnx2x_hilo(u32 *hiref)
3063{
3064 u32 lo = *(hiref + 1);
3065#if (BITS_PER_LONG == 64)
3066 u32 hi = *hiref;
3067
3068 return HILO_U64(hi, lo);
3069#else
3070 return lo;
3071#endif
3072}
3073
3074/*
3075 * Init service functions
3076 */
3077
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003078static void bnx2x_storm_stats_post(struct bnx2x *bp)
3079{
3080 if (!bp->stats_pending) {
3081 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003082 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003083
3084 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003085 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003086 for_each_queue(bp, i)
3087 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003088
3089 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3090 ((u32 *)&ramrod_data)[1],
3091 ((u32 *)&ramrod_data)[0], 0);
3092 if (rc == 0) {
3093 /* stats ramrod has it's own slot on the spq */
3094 bp->spq_left++;
3095 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003096 }
3097 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003098}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003099
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003100static void bnx2x_stats_init(struct bnx2x *bp)
3101{
3102 int port = BP_PORT(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003103 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003104
Eilon Greensteinde832a52009-02-12 08:36:33 +00003105 bp->stats_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003106 bp->executer_idx = 0;
3107 bp->stats_counter = 0;
3108
3109 /* port stats */
3110 if (!BP_NOMCP(bp))
3111 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3112 else
3113 bp->port.port_stx = 0;
3114 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3115
3116 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3117 bp->port.old_nig_stats.brb_discard =
3118 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003119 bp->port.old_nig_stats.brb_truncate =
3120 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003121 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3122 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3123 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3124 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3125
3126 /* function stats */
Eilon Greensteinde832a52009-02-12 08:36:33 +00003127 for_each_queue(bp, i) {
3128 struct bnx2x_fastpath *fp = &bp->fp[i];
3129
3130 memset(&fp->old_tclient, 0,
3131 sizeof(struct tstorm_per_client_stats));
3132 memset(&fp->old_uclient, 0,
3133 sizeof(struct ustorm_per_client_stats));
3134 memset(&fp->old_xclient, 0,
3135 sizeof(struct xstorm_per_client_stats));
3136 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3137 }
3138
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003139 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003140 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3141
3142 bp->stats_state = STATS_STATE_DISABLED;
3143 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3144 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3145}
3146
3147static void bnx2x_hw_stats_post(struct bnx2x *bp)
3148{
3149 struct dmae_command *dmae = &bp->stats_dmae;
3150 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3151
3152 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003153 if (CHIP_REV_IS_SLOW(bp))
3154 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003155
3156 /* loader */
3157 if (bp->executer_idx) {
3158 int loader_idx = PMF_DMAE_C(bp);
3159
3160 memset(dmae, 0, sizeof(struct dmae_command));
3161
3162 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3163 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3164 DMAE_CMD_DST_RESET |
3165#ifdef __BIG_ENDIAN
3166 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3167#else
3168 DMAE_CMD_ENDIANITY_DW_SWAP |
3169#endif
3170 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3171 DMAE_CMD_PORT_0) |
3172 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3173 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3174 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3175 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3176 sizeof(struct dmae_command) *
3177 (loader_idx + 1)) >> 2;
3178 dmae->dst_addr_hi = 0;
3179 dmae->len = sizeof(struct dmae_command) >> 2;
3180 if (CHIP_IS_E1(bp))
3181 dmae->len--;
3182 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3183 dmae->comp_addr_hi = 0;
3184 dmae->comp_val = 1;
3185
3186 *stats_comp = 0;
3187 bnx2x_post_dmae(bp, dmae, loader_idx);
3188
3189 } else if (bp->func_stx) {
3190 *stats_comp = 0;
3191 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3192 }
3193}
3194
3195static int bnx2x_stats_comp(struct bnx2x *bp)
3196{
3197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3198 int cnt = 10;
3199
3200 might_sleep();
3201 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003202 if (!cnt) {
3203 BNX2X_ERR("timeout waiting for stats finished\n");
3204 break;
3205 }
3206 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003207 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003208 }
3209 return 1;
3210}
3211
3212/*
3213 * Statistics service functions
3214 */
3215
3216static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3217{
3218 struct dmae_command *dmae;
3219 u32 opcode;
3220 int loader_idx = PMF_DMAE_C(bp);
3221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3222
3223 /* sanity */
3224 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3225 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003226 return;
3227 }
3228
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003229 bp->executer_idx = 0;
3230
3231 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232 DMAE_CMD_C_ENABLE |
3233 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234#ifdef __BIG_ENDIAN
3235 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236#else
3237 DMAE_CMD_ENDIANITY_DW_SWAP |
3238#endif
3239 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3241
3242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3244 dmae->src_addr_lo = bp->port.port_stx >> 2;
3245 dmae->src_addr_hi = 0;
3246 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3247 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3248 dmae->len = DMAE_LEN32_RD_MAX;
3249 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3250 dmae->comp_addr_hi = 0;
3251 dmae->comp_val = 1;
3252
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3255 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3256 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3258 DMAE_LEN32_RD_MAX * 4);
3259 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3260 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003261 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3262 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3263 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3264 dmae->comp_val = DMAE_COMP_VAL;
3265
3266 *stats_comp = 0;
3267 bnx2x_hw_stats_post(bp);
3268 bnx2x_stats_comp(bp);
3269}
3270
3271static void bnx2x_port_stats_init(struct bnx2x *bp)
3272{
3273 struct dmae_command *dmae;
3274 int port = BP_PORT(bp);
3275 int vn = BP_E1HVN(bp);
3276 u32 opcode;
3277 int loader_idx = PMF_DMAE_C(bp);
3278 u32 mac_addr;
3279 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3280
3281 /* sanity */
3282 if (!bp->link_vars.link_up || !bp->port.pmf) {
3283 BNX2X_ERR("BUG!\n");
3284 return;
3285 }
3286
3287 bp->executer_idx = 0;
3288
3289 /* MCP */
3290 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3291 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3292 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3293#ifdef __BIG_ENDIAN
3294 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3295#else
3296 DMAE_CMD_ENDIANITY_DW_SWAP |
3297#endif
3298 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3299 (vn << DMAE_CMD_E1HVN_SHIFT));
3300
3301 if (bp->port.port_stx) {
3302
3303 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3304 dmae->opcode = opcode;
3305 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3306 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3307 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3308 dmae->dst_addr_hi = 0;
3309 dmae->len = sizeof(struct host_port_stats) >> 2;
3310 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3311 dmae->comp_addr_hi = 0;
3312 dmae->comp_val = 1;
3313 }
3314
3315 if (bp->func_stx) {
3316
3317 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3318 dmae->opcode = opcode;
3319 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3320 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3321 dmae->dst_addr_lo = bp->func_stx >> 2;
3322 dmae->dst_addr_hi = 0;
3323 dmae->len = sizeof(struct host_func_stats) >> 2;
3324 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3325 dmae->comp_addr_hi = 0;
3326 dmae->comp_val = 1;
3327 }
3328
3329 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003330 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3331 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3332 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3333#ifdef __BIG_ENDIAN
3334 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3335#else
3336 DMAE_CMD_ENDIANITY_DW_SWAP |
3337#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003338 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3339 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003340
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003341 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342
3343 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3344 NIG_REG_INGRESS_BMAC0_MEM);
3345
3346 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3347 BIGMAC_REGISTER_TX_STAT_GTBYT */
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = opcode;
3350 dmae->src_addr_lo = (mac_addr +
3351 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3352 dmae->src_addr_hi = 0;
3353 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3354 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3355 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3356 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3358 dmae->comp_addr_hi = 0;
3359 dmae->comp_val = 1;
3360
3361 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3362 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3363 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3364 dmae->opcode = opcode;
3365 dmae->src_addr_lo = (mac_addr +
3366 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3367 dmae->src_addr_hi = 0;
3368 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003369 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003370 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003371 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003372 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3373 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3374 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3375 dmae->comp_addr_hi = 0;
3376 dmae->comp_val = 1;
3377
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003378 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003379
3380 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3381
3382 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (mac_addr +
3386 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3390 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3393 dmae->comp_val = 1;
3394
3395 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3396 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3397 dmae->opcode = opcode;
3398 dmae->src_addr_lo = (mac_addr +
3399 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3400 dmae->src_addr_hi = 0;
3401 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003402 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003404 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003405 dmae->len = 1;
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3408 dmae->comp_val = 1;
3409
3410 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3411 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3412 dmae->opcode = opcode;
3413 dmae->src_addr_lo = (mac_addr +
3414 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3415 dmae->src_addr_hi = 0;
3416 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003417 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003418 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003419 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003420 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3421 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3422 dmae->comp_addr_hi = 0;
3423 dmae->comp_val = 1;
3424 }
3425
3426 /* NIG */
3427 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003428 dmae->opcode = opcode;
3429 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3430 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3431 dmae->src_addr_hi = 0;
3432 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3433 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3434 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3435 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436 dmae->comp_addr_hi = 0;
3437 dmae->comp_val = 1;
3438
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3442 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448 dmae->len = (2*sizeof(u32)) >> 2;
3449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450 dmae->comp_addr_hi = 0;
3451 dmae->comp_val = 1;
3452
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003454 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3455 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3456 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3457#ifdef __BIG_ENDIAN
3458 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3459#else
3460 DMAE_CMD_ENDIANITY_DW_SWAP |
3461#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003462 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3463 (vn << DMAE_CMD_E1HVN_SHIFT));
3464 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3465 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003466 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3468 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3469 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471 dmae->len = (2*sizeof(u32)) >> 2;
3472 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3473 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3474 dmae->comp_val = DMAE_COMP_VAL;
3475
3476 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003477}
3478
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003479static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003480{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003481 struct dmae_command *dmae = &bp->stats_dmae;
3482 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003483
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003484 /* sanity */
3485 if (!bp->func_stx) {
3486 BNX2X_ERR("BUG!\n");
3487 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003488 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003489
3490 bp->executer_idx = 0;
3491 memset(dmae, 0, sizeof(struct dmae_command));
3492
3493 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3494 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3495 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3496#ifdef __BIG_ENDIAN
3497 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3498#else
3499 DMAE_CMD_ENDIANITY_DW_SWAP |
3500#endif
3501 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3502 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3503 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3504 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3505 dmae->dst_addr_lo = bp->func_stx >> 2;
3506 dmae->dst_addr_hi = 0;
3507 dmae->len = sizeof(struct host_func_stats) >> 2;
3508 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3509 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3510 dmae->comp_val = DMAE_COMP_VAL;
3511
3512 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003513}
3514
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003515static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003516{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003517 if (bp->port.pmf)
3518 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003519
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003520 else if (bp->func_stx)
3521 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003522
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003523 bnx2x_hw_stats_post(bp);
3524 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003525}
3526
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003527static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003528{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003529 bnx2x_stats_comp(bp);
3530 bnx2x_stats_pmf_update(bp);
3531 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003532}
3533
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003534static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003535{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003536 bnx2x_stats_comp(bp);
3537 bnx2x_stats_start(bp);
3538}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003539
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003540static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3541{
3542 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3543 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003544 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003545 struct {
3546 u32 lo;
3547 u32 hi;
3548 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003549
3550 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3551 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3552 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3553 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3554 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3555 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003556 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003557 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003558 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003559 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3560 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3561 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3562 UPDATE_STAT64(tx_stat_gt127,
3563 tx_stat_etherstatspkts65octetsto127octets);
3564 UPDATE_STAT64(tx_stat_gt255,
3565 tx_stat_etherstatspkts128octetsto255octets);
3566 UPDATE_STAT64(tx_stat_gt511,
3567 tx_stat_etherstatspkts256octetsto511octets);
3568 UPDATE_STAT64(tx_stat_gt1023,
3569 tx_stat_etherstatspkts512octetsto1023octets);
3570 UPDATE_STAT64(tx_stat_gt1518,
3571 tx_stat_etherstatspkts1024octetsto1522octets);
3572 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3573 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3574 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3575 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3576 UPDATE_STAT64(tx_stat_gterr,
3577 tx_stat_dot3statsinternalmactransmiterrors);
3578 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003579
3580 estats->pause_frames_received_hi =
3581 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3582 estats->pause_frames_received_lo =
3583 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3584
3585 estats->pause_frames_sent_hi =
3586 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3587 estats->pause_frames_sent_lo =
3588 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003589}
3590
3591static void bnx2x_emac_stats_update(struct bnx2x *bp)
3592{
3593 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3594 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003595 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003596
3597 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3598 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3599 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3600 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3601 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3602 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3603 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3604 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3605 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3606 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3607 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3608 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3609 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3610 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3611 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3612 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3613 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3614 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3615 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3616 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3617 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3620 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3621 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3622 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003628
3629 estats->pause_frames_received_hi =
3630 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3631 estats->pause_frames_received_lo =
3632 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3633 ADD_64(estats->pause_frames_received_hi,
3634 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3635 estats->pause_frames_received_lo,
3636 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3637
3638 estats->pause_frames_sent_hi =
3639 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3640 estats->pause_frames_sent_lo =
3641 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3642 ADD_64(estats->pause_frames_sent_hi,
3643 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3644 estats->pause_frames_sent_lo,
3645 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003646}
3647
3648static int bnx2x_hw_stats_update(struct bnx2x *bp)
3649{
3650 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3651 struct nig_stats *old = &(bp->port.old_nig_stats);
3652 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3653 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003654 struct {
3655 u32 lo;
3656 u32 hi;
3657 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003658 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003659
3660 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3661 bnx2x_bmac_stats_update(bp);
3662
3663 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3664 bnx2x_emac_stats_update(bp);
3665
3666 else { /* unreached */
3667 BNX2X_ERR("stats updated by dmae but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003668 return -1;
3669 }
3670
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003671 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3672 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003673 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3674 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003675
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003676 UPDATE_STAT64_NIG(egress_mac_pkt0,
3677 etherstatspkts1024octetsto1522octets);
3678 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003679
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003680 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003681
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003682 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3683 sizeof(struct mac_stx));
3684 estats->brb_drop_hi = pstats->brb_drop_hi;
3685 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003686
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003687 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003688
Eilon Greensteinde832a52009-02-12 08:36:33 +00003689 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3690 if (nig_timer_max != estats->nig_timer_max) {
3691 estats->nig_timer_max = nig_timer_max;
3692 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3693 }
3694
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003695 return 0;
3696}
3697
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003698static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003699{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003700 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003701 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003702 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003703 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3704 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003705 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003706
Eilon Greensteinde832a52009-02-12 08:36:33 +00003707 memset(&(fstats->total_bytes_received_hi), 0,
3708 sizeof(struct host_func_stats) - 2*sizeof(u32));
3709 estats->error_bytes_received_hi = 0;
3710 estats->error_bytes_received_lo = 0;
3711 estats->etherstatsoverrsizepkts_hi = 0;
3712 estats->etherstatsoverrsizepkts_lo = 0;
3713 estats->no_buff_discard_hi = 0;
3714 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003715
Eilon Greensteinde832a52009-02-12 08:36:33 +00003716 for_each_queue(bp, i) {
3717 struct bnx2x_fastpath *fp = &bp->fp[i];
3718 int cl_id = fp->cl_id;
3719 struct tstorm_per_client_stats *tclient =
3720 &stats->tstorm_common.client_statistics[cl_id];
3721 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3722 struct ustorm_per_client_stats *uclient =
3723 &stats->ustorm_common.client_statistics[cl_id];
3724 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3725 struct xstorm_per_client_stats *xclient =
3726 &stats->xstorm_common.client_statistics[cl_id];
3727 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3728 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3729 u32 diff;
3730
3731 /* are storm stats valid? */
3732 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3733 bp->stats_counter) {
3734 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3735 " xstorm counter (%d) != stats_counter (%d)\n",
3736 i, xclient->stats_counter, bp->stats_counter);
3737 return -1;
3738 }
3739 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3740 bp->stats_counter) {
3741 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3742 " tstorm counter (%d) != stats_counter (%d)\n",
3743 i, tclient->stats_counter, bp->stats_counter);
3744 return -2;
3745 }
3746 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3747 bp->stats_counter) {
3748 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3749 " ustorm counter (%d) != stats_counter (%d)\n",
3750 i, uclient->stats_counter, bp->stats_counter);
3751 return -4;
3752 }
3753
3754 qstats->total_bytes_received_hi =
3755 qstats->valid_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003756 le32_to_cpu(tclient->total_rcv_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003757 qstats->total_bytes_received_lo =
3758 qstats->valid_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003759 le32_to_cpu(tclient->total_rcv_bytes.lo);
3760
Eilon Greensteinde832a52009-02-12 08:36:33 +00003761 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003762 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003763 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003764 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003765
3766 ADD_64(qstats->total_bytes_received_hi,
3767 qstats->error_bytes_received_hi,
3768 qstats->total_bytes_received_lo,
3769 qstats->error_bytes_received_lo);
3770
3771 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3772 total_unicast_packets_received);
3773 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3774 total_multicast_packets_received);
3775 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3776 total_broadcast_packets_received);
3777 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3778 etherstatsoverrsizepkts);
3779 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3780
3781 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3782 total_unicast_packets_received);
3783 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3784 total_multicast_packets_received);
3785 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3786 total_broadcast_packets_received);
3787 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3788 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3789 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3790
3791 qstats->total_bytes_transmitted_hi =
3792 le32_to_cpu(xclient->total_sent_bytes.hi);
3793 qstats->total_bytes_transmitted_lo =
3794 le32_to_cpu(xclient->total_sent_bytes.lo);
3795
3796 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3797 total_unicast_packets_transmitted);
3798 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3799 total_multicast_packets_transmitted);
3800 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3801 total_broadcast_packets_transmitted);
3802
3803 old_tclient->checksum_discard = tclient->checksum_discard;
3804 old_tclient->ttl0_discard = tclient->ttl0_discard;
3805
3806 ADD_64(fstats->total_bytes_received_hi,
3807 qstats->total_bytes_received_hi,
3808 fstats->total_bytes_received_lo,
3809 qstats->total_bytes_received_lo);
3810 ADD_64(fstats->total_bytes_transmitted_hi,
3811 qstats->total_bytes_transmitted_hi,
3812 fstats->total_bytes_transmitted_lo,
3813 qstats->total_bytes_transmitted_lo);
3814 ADD_64(fstats->total_unicast_packets_received_hi,
3815 qstats->total_unicast_packets_received_hi,
3816 fstats->total_unicast_packets_received_lo,
3817 qstats->total_unicast_packets_received_lo);
3818 ADD_64(fstats->total_multicast_packets_received_hi,
3819 qstats->total_multicast_packets_received_hi,
3820 fstats->total_multicast_packets_received_lo,
3821 qstats->total_multicast_packets_received_lo);
3822 ADD_64(fstats->total_broadcast_packets_received_hi,
3823 qstats->total_broadcast_packets_received_hi,
3824 fstats->total_broadcast_packets_received_lo,
3825 qstats->total_broadcast_packets_received_lo);
3826 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3827 qstats->total_unicast_packets_transmitted_hi,
3828 fstats->total_unicast_packets_transmitted_lo,
3829 qstats->total_unicast_packets_transmitted_lo);
3830 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3831 qstats->total_multicast_packets_transmitted_hi,
3832 fstats->total_multicast_packets_transmitted_lo,
3833 qstats->total_multicast_packets_transmitted_lo);
3834 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3835 qstats->total_broadcast_packets_transmitted_hi,
3836 fstats->total_broadcast_packets_transmitted_lo,
3837 qstats->total_broadcast_packets_transmitted_lo);
3838 ADD_64(fstats->valid_bytes_received_hi,
3839 qstats->valid_bytes_received_hi,
3840 fstats->valid_bytes_received_lo,
3841 qstats->valid_bytes_received_lo);
3842
3843 ADD_64(estats->error_bytes_received_hi,
3844 qstats->error_bytes_received_hi,
3845 estats->error_bytes_received_lo,
3846 qstats->error_bytes_received_lo);
3847 ADD_64(estats->etherstatsoverrsizepkts_hi,
3848 qstats->etherstatsoverrsizepkts_hi,
3849 estats->etherstatsoverrsizepkts_lo,
3850 qstats->etherstatsoverrsizepkts_lo);
3851 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3852 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3853 }
3854
3855 ADD_64(fstats->total_bytes_received_hi,
3856 estats->rx_stat_ifhcinbadoctets_hi,
3857 fstats->total_bytes_received_lo,
3858 estats->rx_stat_ifhcinbadoctets_lo);
3859
3860 memcpy(estats, &(fstats->total_bytes_received_hi),
3861 sizeof(struct host_func_stats) - 2*sizeof(u32));
3862
3863 ADD_64(estats->etherstatsoverrsizepkts_hi,
3864 estats->rx_stat_dot3statsframestoolong_hi,
3865 estats->etherstatsoverrsizepkts_lo,
3866 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003867 ADD_64(estats->error_bytes_received_hi,
3868 estats->rx_stat_ifhcinbadoctets_hi,
3869 estats->error_bytes_received_lo,
3870 estats->rx_stat_ifhcinbadoctets_lo);
3871
Eilon Greensteinde832a52009-02-12 08:36:33 +00003872 if (bp->port.pmf) {
3873 estats->mac_filter_discard =
3874 le32_to_cpu(tport->mac_filter_discard);
3875 estats->xxoverflow_discard =
3876 le32_to_cpu(tport->xxoverflow_discard);
3877 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003878 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003879 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3880 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003881
3882 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3883
Eilon Greensteinde832a52009-02-12 08:36:33 +00003884 bp->stats_pending = 0;
3885
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003886 return 0;
3887}
3888
3889static void bnx2x_net_stats_update(struct bnx2x *bp)
3890{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003891 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003892 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003893 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003894
3895 nstats->rx_packets =
3896 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3897 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3898 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3899
3900 nstats->tx_packets =
3901 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3902 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3903 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3904
Eilon Greensteinde832a52009-02-12 08:36:33 +00003905 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003906
Eliezer Tamir0e39e642008-02-28 11:54:03 -08003907 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003908
Eilon Greensteinde832a52009-02-12 08:36:33 +00003909 nstats->rx_dropped = estats->mac_discard;
3910 for_each_queue(bp, i)
3911 nstats->rx_dropped +=
3912 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3913
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003914 nstats->tx_dropped = 0;
3915
3916 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003917 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003918
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003919 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003920 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003921
3922 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003923 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3924 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3925 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3926 bnx2x_hilo(&estats->brb_truncate_hi);
3927 nstats->rx_crc_errors =
3928 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3929 nstats->rx_frame_errors =
3930 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3931 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003932 nstats->rx_missed_errors = estats->xxoverflow_discard;
3933
3934 nstats->rx_errors = nstats->rx_length_errors +
3935 nstats->rx_over_errors +
3936 nstats->rx_crc_errors +
3937 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08003938 nstats->rx_fifo_errors +
3939 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003940
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003941 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003942 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3943 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3944 nstats->tx_carrier_errors =
3945 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003946 nstats->tx_fifo_errors = 0;
3947 nstats->tx_heartbeat_errors = 0;
3948 nstats->tx_window_errors = 0;
3949
3950 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00003951 nstats->tx_carrier_errors +
3952 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3953}
3954
3955static void bnx2x_drv_stats_update(struct bnx2x *bp)
3956{
3957 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3958 int i;
3959
3960 estats->driver_xoff = 0;
3961 estats->rx_err_discard_pkt = 0;
3962 estats->rx_skb_alloc_failed = 0;
3963 estats->hw_csum_err = 0;
3964 for_each_queue(bp, i) {
3965 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3966
3967 estats->driver_xoff += qstats->driver_xoff;
3968 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3969 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3970 estats->hw_csum_err += qstats->hw_csum_err;
3971 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003972}
3973
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003974static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003975{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003976 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003977
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003978 if (*stats_comp != DMAE_COMP_VAL)
3979 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003980
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003981 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00003982 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003983
Eilon Greensteinde832a52009-02-12 08:36:33 +00003984 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3985 BNX2X_ERR("storm stats were not updated for 3 times\n");
3986 bnx2x_panic();
3987 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003988 }
3989
Eilon Greensteinde832a52009-02-12 08:36:33 +00003990 bnx2x_net_stats_update(bp);
3991 bnx2x_drv_stats_update(bp);
3992
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003993 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00003994 struct tstorm_per_client_stats *old_tclient =
3995 &bp->fp->old_tclient;
3996 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003997 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003998 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003999 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004000
4001 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4002 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4003 " tx pkt (%lx)\n",
4004 bnx2x_tx_avail(bp->fp),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004005 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004006 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4007 " rx pkt (%lx)\n",
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004008 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4009 bp->fp->rx_comp_cons),
4010 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004011 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4012 "brb truncate %u\n",
4013 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4014 qstats->driver_xoff,
4015 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004016 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004017 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004018 "mac_discard %u mac_filter_discard %u "
4019 "xxovrflow_discard %u brb_truncate_discard %u "
4020 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004021 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004022 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4023 bnx2x_hilo(&qstats->no_buff_discard_hi),
4024 estats->mac_discard, estats->mac_filter_discard,
4025 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004026 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004027
4028 for_each_queue(bp, i) {
4029 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4030 bnx2x_fp(bp, i, tx_pkt),
4031 bnx2x_fp(bp, i, rx_pkt),
4032 bnx2x_fp(bp, i, rx_calls));
4033 }
4034 }
4035
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004036 bnx2x_hw_stats_post(bp);
4037 bnx2x_storm_stats_post(bp);
4038}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004039
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004040static void bnx2x_port_stats_stop(struct bnx2x *bp)
4041{
4042 struct dmae_command *dmae;
4043 u32 opcode;
4044 int loader_idx = PMF_DMAE_C(bp);
4045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004046
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004047 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004048
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004049 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4050 DMAE_CMD_C_ENABLE |
4051 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004052#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004053 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004054#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004055 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004056#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004057 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4058 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4059
4060 if (bp->port.port_stx) {
4061
4062 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4063 if (bp->func_stx)
4064 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4065 else
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4069 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004070 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004071 dmae->len = sizeof(struct host_port_stats) >> 2;
4072 if (bp->func_stx) {
4073 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4074 dmae->comp_addr_hi = 0;
4075 dmae->comp_val = 1;
4076 } else {
4077 dmae->comp_addr_lo =
4078 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4079 dmae->comp_addr_hi =
4080 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4081 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004082
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004083 *stats_comp = 0;
4084 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085 }
4086
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004087 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004088
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004089 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4090 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4091 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4092 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4093 dmae->dst_addr_lo = bp->func_stx >> 2;
4094 dmae->dst_addr_hi = 0;
4095 dmae->len = sizeof(struct host_func_stats) >> 2;
4096 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4097 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4098 dmae->comp_val = DMAE_COMP_VAL;
4099
4100 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004101 }
4102}
4103
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004104static void bnx2x_stats_stop(struct bnx2x *bp)
4105{
4106 int update = 0;
4107
4108 bnx2x_stats_comp(bp);
4109
4110 if (bp->port.pmf)
4111 update = (bnx2x_hw_stats_update(bp) == 0);
4112
4113 update |= (bnx2x_storm_stats_update(bp) == 0);
4114
4115 if (update) {
4116 bnx2x_net_stats_update(bp);
4117
4118 if (bp->port.pmf)
4119 bnx2x_port_stats_stop(bp);
4120
4121 bnx2x_hw_stats_post(bp);
4122 bnx2x_stats_comp(bp);
4123 }
4124}
4125
4126static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4127{
4128}
4129
4130static const struct {
4131 void (*action)(struct bnx2x *bp);
4132 enum bnx2x_stats_state next_state;
4133} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4134/* state event */
4135{
4136/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4137/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4138/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4139/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4140},
4141{
4142/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4143/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4144/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4145/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4146}
4147};
4148
4149static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4150{
4151 enum bnx2x_stats_state state = bp->stats_state;
4152
4153 bnx2x_stats_stm[state][event].action(bp);
4154 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4155
4156 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4157 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4158 state, event, bp->stats_state);
4159}
4160
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004161static void bnx2x_timer(unsigned long data)
4162{
4163 struct bnx2x *bp = (struct bnx2x *) data;
4164
4165 if (!netif_running(bp->dev))
4166 return;
4167
4168 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004169 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004170
4171 if (poll) {
4172 struct bnx2x_fastpath *fp = &bp->fp[0];
4173 int rc;
4174
4175 bnx2x_tx_int(fp, 1000);
4176 rc = bnx2x_rx_int(fp, 1000);
4177 }
4178
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004179 if (!BP_NOMCP(bp)) {
4180 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004181 u32 drv_pulse;
4182 u32 mcp_pulse;
4183
4184 ++bp->fw_drv_pulse_wr_seq;
4185 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4186 /* TBD - add SYSTEM_TIME */
4187 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004188 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004189
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004190 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191 MCP_PULSE_SEQ_MASK);
4192 /* The delta between driver pulse and mcp response
4193 * should be 1 (before mcp response) or 0 (after mcp response)
4194 */
4195 if ((drv_pulse != mcp_pulse) &&
4196 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4197 /* someone lost a heartbeat... */
4198 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4199 drv_pulse, mcp_pulse);
4200 }
4201 }
4202
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004203 if ((bp->state == BNX2X_STATE_OPEN) ||
4204 (bp->state == BNX2X_STATE_DISABLED))
4205 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004206
Eliezer Tamirf1410642008-02-28 11:51:50 -08004207timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004208 mod_timer(&bp->timer, jiffies + bp->current_interval);
4209}
4210
4211/* end of Statistics */
4212
4213/* nic init */
4214
4215/*
4216 * nic init service functions
4217 */
4218
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004219static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004221 int port = BP_PORT(bp);
4222
4223 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4224 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
Yitchak Gertner35302982008-08-13 15:53:12 -07004225 sizeof(struct ustorm_status_block)/4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004226 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4227 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
Yitchak Gertner35302982008-08-13 15:53:12 -07004228 sizeof(struct cstorm_status_block)/4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004229}
4230
Eilon Greenstein5c862842008-08-13 15:51:48 -07004231static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4232 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004233{
4234 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004235 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004236 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004237 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238
4239 /* USTORM */
4240 section = ((u64)mapping) + offsetof(struct host_status_block,
4241 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004242 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004243
4244 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004245 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004246 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004247 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004248 U64_HI(section));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004249 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4250 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004251
4252 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4253 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004254 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004255
4256 /* CSTORM */
4257 section = ((u64)mapping) + offsetof(struct host_status_block,
4258 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004259 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004260
4261 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004262 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004263 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004264 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004266 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4267 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004268
4269 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4270 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004271 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004272
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004273 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4274}
4275
4276static void bnx2x_zero_def_sb(struct bnx2x *bp)
4277{
4278 int func = BP_FUNC(bp);
4279
4280 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4281 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4282 sizeof(struct ustorm_def_status_block)/4);
4283 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4284 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285 sizeof(struct cstorm_def_status_block)/4);
4286 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4287 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288 sizeof(struct xstorm_def_status_block)/4);
4289 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4290 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291 sizeof(struct tstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004292}
4293
4294static void bnx2x_init_def_sb(struct bnx2x *bp,
4295 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004296 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004297{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004298 int port = BP_PORT(bp);
4299 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004300 int index, val, reg_offset;
4301 u64 section;
4302
4303 /* ATTN */
4304 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4305 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004306 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004307
Eliezer Tamir49d66772008-02-28 11:53:13 -08004308 bp->attn_state = 0;
4309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004310 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4311 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4312
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004313 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004314 bp->attn_group[index].sig[0] = REG_RD(bp,
4315 reg_offset + 0x10*index);
4316 bp->attn_group[index].sig[1] = REG_RD(bp,
4317 reg_offset + 0x4 + 0x10*index);
4318 bp->attn_group[index].sig[2] = REG_RD(bp,
4319 reg_offset + 0x8 + 0x10*index);
4320 bp->attn_group[index].sig[3] = REG_RD(bp,
4321 reg_offset + 0xc + 0x10*index);
4322 }
4323
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004324 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4325 HC_REG_ATTN_MSG0_ADDR_L);
4326
4327 REG_WR(bp, reg_offset, U64_LO(section));
4328 REG_WR(bp, reg_offset + 4, U64_HI(section));
4329
4330 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4331
4332 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004333 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004334 REG_WR(bp, reg_offset, val);
4335
4336 /* USTORM */
4337 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4338 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004339 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004340
4341 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004342 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004343 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004344 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004345 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004346 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004347 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004348
4349 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4350 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004351 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004352
4353 /* CSTORM */
4354 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4355 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004356 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004357
4358 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004359 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004361 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004363 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004364 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004365
4366 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4367 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004368 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004369
4370 /* TSTORM */
4371 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4372 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004373 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004374
4375 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004376 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004377 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004378 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004379 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004380 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004381 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004382
4383 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4384 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004385 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004386
4387 /* XSTORM */
4388 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4389 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004390 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004391
4392 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004393 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004394 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004395 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004396 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004397 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004398 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004399
4400 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4401 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004402 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004403
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004404 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004405 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004406
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004407 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004408}
4409
4410static void bnx2x_update_coalesce(struct bnx2x *bp)
4411{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004412 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004413 int i;
4414
4415 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004416 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004417
4418 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4419 REG_WR8(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004420 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004421 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004422 bp->rx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004423 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004424 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004425 U_SB_ETH_RX_CQ_INDEX),
4426 bp->rx_ticks ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004427
4428 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4429 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004430 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004431 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004432 bp->tx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004433 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004434 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004435 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004436 bp->tx_ticks ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004437 }
4438}
4439
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004440static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4441 struct bnx2x_fastpath *fp, int last)
4442{
4443 int i;
4444
4445 for (i = 0; i < last; i++) {
4446 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4447 struct sk_buff *skb = rx_buf->skb;
4448
4449 if (skb == NULL) {
4450 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4451 continue;
4452 }
4453
4454 if (fp->tpa_state[i] == BNX2X_TPA_START)
4455 pci_unmap_single(bp->pdev,
4456 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07004457 bp->rx_buf_size,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004458 PCI_DMA_FROMDEVICE);
4459
4460 dev_kfree_skb(skb);
4461 rx_buf->skb = NULL;
4462 }
4463}
4464
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004465static void bnx2x_init_rx_rings(struct bnx2x *bp)
4466{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004467 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004468 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4469 ETH_MAX_AGGREGATION_QUEUES_E1H;
4470 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004471 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004472
Eilon Greenstein87942b42009-02-12 08:36:49 +00004473 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004474 DP(NETIF_MSG_IFUP,
4475 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004476
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004477 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004478
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004479 for_each_rx_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004480 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004481
Eilon Greenstein32626232008-08-13 15:51:07 -07004482 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004483 fp->tpa_pool[i].skb =
4484 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4485 if (!fp->tpa_pool[i].skb) {
4486 BNX2X_ERR("Failed to allocate TPA "
4487 "skb pool for queue[%d] - "
4488 "disabling TPA on this "
4489 "queue!\n", j);
4490 bnx2x_free_tpa_pool(bp, fp, i);
4491 fp->disable_tpa = 1;
4492 break;
4493 }
4494 pci_unmap_addr_set((struct sw_rx_bd *)
4495 &bp->fp->tpa_pool[i],
4496 mapping, 0);
4497 fp->tpa_state[i] = BNX2X_TPA_STOP;
4498 }
4499 }
4500 }
4501
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004502 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004503 struct bnx2x_fastpath *fp = &bp->fp[j];
4504
4505 fp->rx_bd_cons = 0;
4506 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004507 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004508
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004509 /* "next page" elements initialization */
4510 /* SGE ring */
4511 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4512 struct eth_rx_sge *sge;
4513
4514 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4515 sge->addr_hi =
4516 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4517 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4518 sge->addr_lo =
4519 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4520 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521 }
4522
4523 bnx2x_init_sge_ring_bit_mask(fp);
4524
4525 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004526 for (i = 1; i <= NUM_RX_RINGS; i++) {
4527 struct eth_rx_bd *rx_bd;
4528
4529 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4530 rx_bd->addr_hi =
4531 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004532 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004533 rx_bd->addr_lo =
4534 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004535 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004536 }
4537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004538 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004539 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4540 struct eth_rx_cqe_next_page *nextpg;
4541
4542 nextpg = (struct eth_rx_cqe_next_page *)
4543 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4544 nextpg->addr_hi =
4545 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004546 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004547 nextpg->addr_lo =
4548 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004549 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550 }
4551
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004552 /* Allocate SGEs and initialize the ring elements */
4553 for (i = 0, ring_prod = 0;
4554 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004555
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004556 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4557 BNX2X_ERR("was only able to allocate "
4558 "%d rx sges\n", i);
4559 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4560 /* Cleanup already allocated elements */
4561 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07004562 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004563 fp->disable_tpa = 1;
4564 ring_prod = 0;
4565 break;
4566 }
4567 ring_prod = NEXT_SGE_IDX(ring_prod);
4568 }
4569 fp->rx_sge_prod = ring_prod;
4570
4571 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004572 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004573 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004574 for (i = 0; i < bp->rx_ring_size; i++) {
4575 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4576 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004577 "%d rx skbs on queue[%d]\n", i, j);
4578 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004579 break;
4580 }
4581 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004582 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07004583 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584 }
4585
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004586 fp->rx_bd_prod = ring_prod;
4587 /* must not have more available CQEs than BDs */
4588 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4589 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004590 fp->rx_pkt = fp->rx_calls = 0;
4591
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004592 /* Warning!
4593 * this will generate an interrupt (to the TSTORM)
4594 * must only be done after chip is initialized
4595 */
4596 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4597 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004598 if (j != 0)
4599 continue;
4600
4601 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004602 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004603 U64_LO(fp->rx_comp_mapping));
4604 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004605 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004606 U64_HI(fp->rx_comp_mapping));
4607 }
4608}
4609
4610static void bnx2x_init_tx_ring(struct bnx2x *bp)
4611{
4612 int i, j;
4613
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004614 for_each_tx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004615 struct bnx2x_fastpath *fp = &bp->fp[j];
4616
4617 for (i = 1; i <= NUM_TX_RINGS; i++) {
4618 struct eth_tx_bd *tx_bd =
4619 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4620
4621 tx_bd->addr_hi =
4622 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004623 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004624 tx_bd->addr_lo =
4625 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004626 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004627 }
4628
4629 fp->tx_pkt_prod = 0;
4630 fp->tx_pkt_cons = 0;
4631 fp->tx_bd_prod = 0;
4632 fp->tx_bd_cons = 0;
4633 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4634 fp->tx_pkt = 0;
4635 }
4636}
4637
4638static void bnx2x_init_sp_ring(struct bnx2x *bp)
4639{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004640 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004641
4642 spin_lock_init(&bp->spq_lock);
4643
4644 bp->spq_left = MAX_SPQ_PENDING;
4645 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004646 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4647 bp->spq_prod_bd = bp->spq;
4648 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4649
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004650 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004651 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004652 REG_WR(bp,
4653 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004654 U64_HI(bp->spq_mapping));
4655
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004656 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004657 bp->spq_prod_idx);
4658}
4659
4660static void bnx2x_init_context(struct bnx2x *bp)
4661{
4662 int i;
4663
4664 for_each_queue(bp, i) {
4665 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4666 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00004667 u8 cl_id = fp->cl_id;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004668 u8 sb_id = fp->sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004669
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004670 context->ustorm_st_context.common.sb_index_numbers =
4671 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004672 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004673 context->ustorm_st_context.common.status_block_id = sb_id;
4674 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004675 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4676 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4677 context->ustorm_st_context.common.statistics_counter_id =
4678 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004679 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00004680 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004681 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07004682 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004683 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004684 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004685 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004686 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004687 if (!fp->disable_tpa) {
4688 context->ustorm_st_context.common.flags |=
4689 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4690 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4691 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004692 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4693 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004694 context->ustorm_st_context.common.sge_page_base_hi =
4695 U64_HI(fp->rx_sge_mapping);
4696 context->ustorm_st_context.common.sge_page_base_lo =
4697 U64_LO(fp->rx_sge_mapping);
4698 }
4699
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004700 context->ustorm_ag_context.cdu_usage =
4701 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4702 CDU_REGION_NUMBER_UCM_AG,
4703 ETH_CONNECTION_TYPE);
4704
4705 context->xstorm_st_context.tx_bd_page_base_hi =
4706 U64_HI(fp->tx_desc_mapping);
4707 context->xstorm_st_context.tx_bd_page_base_lo =
4708 U64_LO(fp->tx_desc_mapping);
4709 context->xstorm_st_context.db_data_addr_hi =
4710 U64_HI(fp->tx_prods_mapping);
4711 context->xstorm_st_context.db_data_addr_lo =
4712 U64_LO(fp->tx_prods_mapping);
Eilon Greenstein0626b892009-02-12 08:38:14 +00004713 context->xstorm_st_context.statistics_data = (cl_id |
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004714 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004715 context->cstorm_st_context.sb_index_number =
Eilon Greenstein5c862842008-08-13 15:51:48 -07004716 C_SB_ETH_TX_CQ_INDEX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004717 context->cstorm_st_context.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004718
4719 context->xstorm_ag_context.cdu_reserved =
4720 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4721 CDU_REGION_NUMBER_XCM_AG,
4722 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004723 }
4724}
4725
4726static void bnx2x_init_ind_table(struct bnx2x *bp)
4727{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004728 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729 int i;
4730
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004731 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004732 return;
4733
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004734 DP(NETIF_MSG_IFUP,
4735 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004736 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004737 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004738 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Eilon Greenstein0626b892009-02-12 08:38:14 +00004739 bp->fp->cl_id + (i % bp->num_rx_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004740}
4741
Eliezer Tamir49d66772008-02-28 11:53:13 -08004742static void bnx2x_set_client_config(struct bnx2x *bp)
4743{
Eliezer Tamir49d66772008-02-28 11:53:13 -08004744 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004745 int port = BP_PORT(bp);
4746 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004747
Eilon Greensteine7799c52009-01-14 21:30:27 -08004748 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004749 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004750 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4751 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08004752#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08004753 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08004754 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004755 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004756 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4757 }
4758#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08004759
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004760 if (bp->flags & TPA_ENABLE_FLAG) {
4761 tstorm_client.max_sges_for_packet =
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08004762 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004763 tstorm_client.max_sges_for_packet =
4764 ((tstorm_client.max_sges_for_packet +
4765 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4766 PAGES_PER_SGE_SHIFT;
4767
4768 tstorm_client.config_flags |=
4769 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4770 }
4771
Eliezer Tamir49d66772008-02-28 11:53:13 -08004772 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004773 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4774
Eliezer Tamir49d66772008-02-28 11:53:13 -08004775 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004776 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08004777 ((u32 *)&tstorm_client)[0]);
4778 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004779 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08004780 ((u32 *)&tstorm_client)[1]);
4781 }
4782
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004783 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4784 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08004785}
4786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004787static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4788{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004789 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004790 int mode = bp->rx_mode;
4791 int mask = (1 << BP_L_ID(bp));
4792 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004793 int i;
4794
Eilon Greenstein3196a882008-08-13 15:58:49 -07004795 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004796
4797 switch (mode) {
4798 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004799 tstorm_mac_filter.ucast_drop_all = mask;
4800 tstorm_mac_filter.mcast_drop_all = mask;
4801 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004802 break;
4803 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004804 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004805 break;
4806 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004807 tstorm_mac_filter.mcast_accept_all = mask;
4808 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004809 break;
4810 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004811 tstorm_mac_filter.ucast_accept_all = mask;
4812 tstorm_mac_filter.mcast_accept_all = mask;
4813 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004814 break;
4815 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004816 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4817 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004818 }
4819
4820 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4821 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004822 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823 ((u32 *)&tstorm_mac_filter)[i]);
4824
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004825/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004826 ((u32 *)&tstorm_mac_filter)[i]); */
4827 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004828
Eliezer Tamir49d66772008-02-28 11:53:13 -08004829 if (mode != BNX2X_RX_MODE_NONE)
4830 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004831}
4832
Eilon Greenstein471de712008-08-13 15:49:35 -07004833static void bnx2x_init_internal_common(struct bnx2x *bp)
4834{
4835 int i;
4836
Yitchak Gertner3cdf1db2008-08-25 15:24:21 -07004837 if (bp->flags & TPA_ENABLE_FLAG) {
4838 struct tstorm_eth_tpa_exist tpa = {0};
4839
4840 tpa.tpa_exist = 1;
4841
4842 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4843 ((u32 *)&tpa)[0]);
4844 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4845 ((u32 *)&tpa)[1]);
4846 }
4847
Eilon Greenstein471de712008-08-13 15:49:35 -07004848 /* Zero this manually as its initialization is
4849 currently missing in the initTool */
4850 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4851 REG_WR(bp, BAR_USTRORM_INTMEM +
4852 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4853}
4854
4855static void bnx2x_init_internal_port(struct bnx2x *bp)
4856{
4857 int port = BP_PORT(bp);
4858
4859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4860 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4861 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4863}
4864
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00004865/* Calculates the sum of vn_min_rates.
4866 It's needed for further normalizing of the min_rates.
4867 Returns:
4868 sum of vn_min_rates.
4869 or
4870 0 - if all the min_rates are 0.
4871 In the later case fainess algorithm should be deactivated.
4872 If not all min_rates are zero then those that are zeroes will be set to 1.
4873 */
4874static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4875{
4876 int all_zero = 1;
4877 int port = BP_PORT(bp);
4878 int vn;
4879
4880 bp->vn_weight_sum = 0;
4881 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4882 int func = 2*vn + port;
4883 u32 vn_cfg =
4884 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4885 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4886 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4887
4888 /* Skip hidden vns */
4889 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4890 continue;
4891
4892 /* If min rate is zero - set it to 1 */
4893 if (!vn_min_rate)
4894 vn_min_rate = DEF_MIN_RATE;
4895 else
4896 all_zero = 0;
4897
4898 bp->vn_weight_sum += vn_min_rate;
4899 }
4900
4901 /* ... only if all min rates are zeros - disable fairness */
4902 if (all_zero)
4903 bp->vn_weight_sum = 0;
4904}
4905
Eilon Greenstein471de712008-08-13 15:49:35 -07004906static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004907{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004908 struct tstorm_eth_function_common_config tstorm_config = {0};
4909 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004910 int port = BP_PORT(bp);
4911 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004912 int i, j;
4913 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07004914 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004915
4916 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004917 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004918 tstorm_config.rss_result_mask = MULTI_MASK;
4919 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004920 if (IS_E1HMF(bp))
4921 tstorm_config.config_flags |=
4922 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004923
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004924 tstorm_config.leading_client_id = BP_L_ID(bp);
4925
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004926 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004927 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004928 (*(u32 *)&tstorm_config));
4929
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004930 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004931 bnx2x_set_storm_rx_mode(bp);
4932
Eilon Greensteinde832a52009-02-12 08:36:33 +00004933 for_each_queue(bp, i) {
4934 u8 cl_id = bp->fp[i].cl_id;
4935
4936 /* reset xstorm per client statistics */
4937 offset = BAR_XSTRORM_INTMEM +
4938 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4939 for (j = 0;
4940 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4941 REG_WR(bp, offset + j*4, 0);
4942
4943 /* reset tstorm per client statistics */
4944 offset = BAR_TSTRORM_INTMEM +
4945 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946 for (j = 0;
4947 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4948 REG_WR(bp, offset + j*4, 0);
4949
4950 /* reset ustorm per client statistics */
4951 offset = BAR_USTRORM_INTMEM +
4952 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004956 }
4957
4958 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004959 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004960
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004961 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004962 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004963 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004964 ((u32 *)&stats_flags)[1]);
4965
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004966 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004967 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004968 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004969 ((u32 *)&stats_flags)[1]);
4970
Eilon Greensteinde832a52009-02-12 08:36:33 +00004971 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4972 ((u32 *)&stats_flags)[0]);
4973 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4974 ((u32 *)&stats_flags)[1]);
4975
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004976 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004977 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004979 ((u32 *)&stats_flags)[1]);
4980
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004981 REG_WR(bp, BAR_XSTRORM_INTMEM +
4982 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4983 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4984 REG_WR(bp, BAR_XSTRORM_INTMEM +
4985 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4986 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4987
4988 REG_WR(bp, BAR_TSTRORM_INTMEM +
4989 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991 REG_WR(bp, BAR_TSTRORM_INTMEM +
4992 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004994
Eilon Greensteinde832a52009-02-12 08:36:33 +00004995 REG_WR(bp, BAR_USTRORM_INTMEM +
4996 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005002 if (CHIP_IS_E1H(bp)) {
5003 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5004 IS_E1HMF(bp));
5005 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5006 IS_E1HMF(bp));
5007 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5008 IS_E1HMF(bp));
5009 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5010 IS_E1HMF(bp));
5011
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005012 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5013 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005014 }
5015
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005016 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5017 max_agg_size =
5018 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5019 SGE_PAGE_SIZE * PAGES_PER_SGE),
5020 (u32)0xffff);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005021 for_each_rx_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005022 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005023
5024 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005025 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005026 U64_LO(fp->rx_comp_mapping));
5027 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005028 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005029 U64_HI(fp->rx_comp_mapping));
5030
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005031 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005032 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005033 max_agg_size);
5034 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005035
Eilon Greenstein1c063282009-02-12 08:36:43 +00005036 /* dropless flow control */
5037 if (CHIP_IS_E1H(bp)) {
5038 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5039
5040 rx_pause.bd_thr_low = 250;
5041 rx_pause.cqe_thr_low = 250;
5042 rx_pause.cos = 1;
5043 rx_pause.sge_thr_low = 0;
5044 rx_pause.bd_thr_high = 350;
5045 rx_pause.cqe_thr_high = 350;
5046 rx_pause.sge_thr_high = 0;
5047
5048 for_each_rx_queue(bp, i) {
5049 struct bnx2x_fastpath *fp = &bp->fp[i];
5050
5051 if (!fp->disable_tpa) {
5052 rx_pause.sge_thr_low = 150;
5053 rx_pause.sge_thr_high = 250;
5054 }
5055
5056
5057 offset = BAR_USTRORM_INTMEM +
5058 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5059 fp->cl_id);
5060 for (j = 0;
5061 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5062 j++)
5063 REG_WR(bp, offset + j*4,
5064 ((u32 *)&rx_pause)[j]);
5065 }
5066 }
5067
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005068 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5069
5070 /* Init rate shaping and fairness contexts */
5071 if (IS_E1HMF(bp)) {
5072 int vn;
5073
5074 /* During init there is no active link
5075 Until link is up, set link rate to 10Gbps */
5076 bp->link_vars.line_speed = SPEED_10000;
5077 bnx2x_init_port_minmax(bp);
5078
5079 bnx2x_calc_vn_weight_sum(bp);
5080
5081 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5082 bnx2x_init_vn_minmax(bp, 2*vn + port);
5083
5084 /* Enable rate shaping and fairness */
5085 bp->cmng.flags.cmng_enables =
5086 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5087 if (bp->vn_weight_sum)
5088 bp->cmng.flags.cmng_enables |=
5089 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5090 else
5091 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5092 " fairness will be disabled\n");
5093 } else {
5094 /* rate shaping and fairness are disabled */
5095 DP(NETIF_MSG_IFUP,
5096 "single function mode minmax will be disabled\n");
5097 }
5098
5099
5100 /* Store it to internal memory */
5101 if (bp->port.pmf)
5102 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5103 REG_WR(bp, BAR_XSTRORM_INTMEM +
5104 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5105 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005106}
5107
Eilon Greenstein471de712008-08-13 15:49:35 -07005108static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5109{
5110 switch (load_code) {
5111 case FW_MSG_CODE_DRV_LOAD_COMMON:
5112 bnx2x_init_internal_common(bp);
5113 /* no break */
5114
5115 case FW_MSG_CODE_DRV_LOAD_PORT:
5116 bnx2x_init_internal_port(bp);
5117 /* no break */
5118
5119 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5120 bnx2x_init_internal_func(bp);
5121 break;
5122
5123 default:
5124 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5125 break;
5126 }
5127}
5128
5129static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005130{
5131 int i;
5132
5133 for_each_queue(bp, i) {
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
5135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005136 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005137 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005139 fp->cl_id = BP_L_ID(bp) + i;
5140 fp->sb_id = fp->cl_id;
5141 DP(NETIF_MSG_IFUP,
5142 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00005143 bp, fp->status_blk, i, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005144 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005145 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005146 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005147 }
5148
Eilon Greenstein5c862842008-08-13 15:51:48 -07005149 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5150 DEF_SB_ID);
5151 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005152 bnx2x_update_coalesce(bp);
5153 bnx2x_init_rx_rings(bp);
5154 bnx2x_init_tx_ring(bp);
5155 bnx2x_init_sp_ring(bp);
5156 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005157 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005158 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005159 bnx2x_stats_init(bp);
5160
5161 /* At this point, we are ready for interrupts */
5162 atomic_set(&bp->intr_sem, 0);
5163
5164 /* flush all before enabling interrupts */
5165 mb();
5166 mmiowb();
5167
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005168 bnx2x_int_enable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005169}
5170
5171/* end of nic init */
5172
5173/*
5174 * gzip service functions
5175 */
5176
5177static int bnx2x_gunzip_init(struct bnx2x *bp)
5178{
5179 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5180 &bp->gunzip_mapping);
5181 if (bp->gunzip_buf == NULL)
5182 goto gunzip_nomem1;
5183
5184 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5185 if (bp->strm == NULL)
5186 goto gunzip_nomem2;
5187
5188 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5189 GFP_KERNEL);
5190 if (bp->strm->workspace == NULL)
5191 goto gunzip_nomem3;
5192
5193 return 0;
5194
5195gunzip_nomem3:
5196 kfree(bp->strm);
5197 bp->strm = NULL;
5198
5199gunzip_nomem2:
5200 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5201 bp->gunzip_mapping);
5202 bp->gunzip_buf = NULL;
5203
5204gunzip_nomem1:
5205 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005206 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005207 return -ENOMEM;
5208}
5209
5210static void bnx2x_gunzip_end(struct bnx2x *bp)
5211{
5212 kfree(bp->strm->workspace);
5213
5214 kfree(bp->strm);
5215 bp->strm = NULL;
5216
5217 if (bp->gunzip_buf) {
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5221 }
5222}
5223
5224static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5225{
5226 int n, rc;
5227
5228 /* check gzip header */
5229 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5230 return -EINVAL;
5231
5232 n = 10;
5233
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005234#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005235
5236 if (zbuf[3] & FNAME)
5237 while ((zbuf[n++] != 0) && (n < len));
5238
5239 bp->strm->next_in = zbuf + n;
5240 bp->strm->avail_in = len - n;
5241 bp->strm->next_out = bp->gunzip_buf;
5242 bp->strm->avail_out = FW_BUF_SIZE;
5243
5244 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5245 if (rc != Z_OK)
5246 return rc;
5247
5248 rc = zlib_inflate(bp->strm, Z_FINISH);
5249 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5250 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5251 bp->dev->name, bp->strm->msg);
5252
5253 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5254 if (bp->gunzip_outlen & 0x3)
5255 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5256 " gunzip_outlen (%d) not aligned\n",
5257 bp->dev->name, bp->gunzip_outlen);
5258 bp->gunzip_outlen >>= 2;
5259
5260 zlib_inflateEnd(bp->strm);
5261
5262 if (rc == Z_STREAM_END)
5263 return 0;
5264
5265 return rc;
5266}
5267
5268/* nic load/unload */
5269
5270/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005271 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272 */
5273
5274/* send a NIG loopback debug packet */
5275static void bnx2x_lb_pckt(struct bnx2x *bp)
5276{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005277 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005278
5279 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280 wb_write[0] = 0x55555555;
5281 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005282 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005283 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005284
5285 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005286 wb_write[0] = 0x09000000;
5287 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005288 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005289 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005290}
5291
5292/* some of the internal memories
5293 * are not directly readable from the driver
5294 * to test them we send debug packets
5295 */
5296static int bnx2x_int_mem_test(struct bnx2x *bp)
5297{
5298 int factor;
5299 int count, i;
5300 u32 val = 0;
5301
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005302 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005304 else if (CHIP_REV_IS_EMUL(bp))
5305 factor = 200;
5306 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005307 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005308
5309 DP(NETIF_MSG_HW, "start part1\n");
5310
5311 /* Disable inputs of parser neighbor blocks */
5312 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5313 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5314 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005315 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316
5317 /* Write 0 to parser credits for CFC search request */
5318 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5319
5320 /* send Ethernet packet */
5321 bnx2x_lb_pckt(bp);
5322
5323 /* TODO do i reset NIG statistic? */
5324 /* Wait until NIG register shows 1 packet of size 0x10 */
5325 count = 1000 * factor;
5326 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005327
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005328 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5329 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005330 if (val == 0x10)
5331 break;
5332
5333 msleep(10);
5334 count--;
5335 }
5336 if (val != 0x10) {
5337 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5338 return -1;
5339 }
5340
5341 /* Wait until PRS register shows 1 packet */
5342 count = 1000 * factor;
5343 while (count) {
5344 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005345 if (val == 1)
5346 break;
5347
5348 msleep(10);
5349 count--;
5350 }
5351 if (val != 0x1) {
5352 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5353 return -2;
5354 }
5355
5356 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005357 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005358 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005359 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005360 msleep(50);
5361 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5362 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5363
5364 DP(NETIF_MSG_HW, "part2\n");
5365
5366 /* Disable inputs of parser neighbor blocks */
5367 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5368 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5369 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005370 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005371
5372 /* Write 0 to parser credits for CFC search request */
5373 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5374
5375 /* send 10 Ethernet packets */
5376 for (i = 0; i < 10; i++)
5377 bnx2x_lb_pckt(bp);
5378
5379 /* Wait until NIG register shows 10 + 1
5380 packets of size 11*0x10 = 0xb0 */
5381 count = 1000 * factor;
5382 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005383
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005384 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005386 if (val == 0xb0)
5387 break;
5388
5389 msleep(10);
5390 count--;
5391 }
5392 if (val != 0xb0) {
5393 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5394 return -3;
5395 }
5396
5397 /* Wait until PRS register shows 2 packets */
5398 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5399 if (val != 2)
5400 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5401
5402 /* Write 1 to parser credits for CFC search request */
5403 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5404
5405 /* Wait until PRS register shows 3 packets */
5406 msleep(10 * factor);
5407 /* Wait until NIG register shows 1 packet of size 0x10 */
5408 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409 if (val != 3)
5410 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5411
5412 /* clear NIG EOP FIFO */
5413 for (i = 0; i < 11; i++)
5414 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5415 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5416 if (val != 1) {
5417 BNX2X_ERR("clear of NIG failed\n");
5418 return -4;
5419 }
5420
5421 /* Reset and init BRB, PRS, NIG */
5422 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5423 msleep(50);
5424 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5425 msleep(50);
5426 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5427 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5428#ifndef BCM_ISCSI
5429 /* set NIC mode */
5430 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5431#endif
5432
5433 /* Enable inputs of parser neighbor blocks */
5434 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5435 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5436 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005437 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005438
5439 DP(NETIF_MSG_HW, "done\n");
5440
5441 return 0; /* OK */
5442}
5443
5444static void enable_blocks_attention(struct bnx2x *bp)
5445{
5446 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5447 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5448 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5449 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5450 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5451 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5452 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5453 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5454 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005455/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5456/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005457 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5458 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5459 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005460/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5461/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005462 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5463 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5464 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5465 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005466/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5467/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5468 if (CHIP_REV_IS_FPGA(bp))
5469 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5470 else
5471 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005472 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5473 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5474 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005475/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5476/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005477 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5478 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005479/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5480 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005481}
5482
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005483
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005484static void bnx2x_reset_common(struct bnx2x *bp)
5485{
5486 /* reset_common */
5487 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5488 0xd3ffff7f);
5489 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5490}
5491
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005492static int bnx2x_init_common(struct bnx2x *bp)
5493{
5494 u32 val, i;
5495
5496 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5497
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005498 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005499 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5501
5502 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5503 if (CHIP_IS_E1H(bp))
5504 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5505
5506 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5507 msleep(30);
5508 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5509
5510 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5511 if (CHIP_IS_E1(bp)) {
5512 /* enable HW interrupt from PXP on USDM overflow
5513 bit 16 on INT_MASK_0 */
5514 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005515 }
5516
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005517 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5518 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005519
5520#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005521 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5522 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5523 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5524 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5525 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005526 /* make sure this value is 0 */
5527 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005528
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005529/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5530 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5531 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5532 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5533 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005534#endif
5535
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005536 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005537#ifdef BCM_ISCSI
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005538 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5539 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5540 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005541#endif
5542
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005543 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5544 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005545
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005546 /* let the HW do it's magic ... */
5547 msleep(100);
5548 /* finish PXP init */
5549 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5550 if (val != 1) {
5551 BNX2X_ERR("PXP2 CFG failed\n");
5552 return -EBUSY;
5553 }
5554 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5555 if (val != 1) {
5556 BNX2X_ERR("PXP2 RD_INIT failed\n");
5557 return -EBUSY;
5558 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005559
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005560 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5561 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005562
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005563 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005564
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005565 /* clean the DMAE memory */
5566 bp->dmae_ready = 1;
5567 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005568
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005569 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5570 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5571 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5572 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005573
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005574 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5575 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5576 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5577 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5578
5579 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5580 /* soft reset pulse */
5581 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5582 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005583
5584#ifdef BCM_ISCSI
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005585 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005586#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005587
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005588 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5589 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5590 if (!CHIP_REV_IS_SLOW(bp)) {
5591 /* enable hw interrupt from doorbell Q */
5592 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5593 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005594
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005595 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005596 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005597 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005598 /* set NIC mode */
5599 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005600 if (CHIP_IS_E1H(bp))
5601 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005602
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005603 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5604 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5605 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5606 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005607
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005608 if (CHIP_IS_E1H(bp)) {
5609 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5610 STORM_INTMEM_SIZE_E1H/2);
5611 bnx2x_init_fill(bp,
5612 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5613 0, STORM_INTMEM_SIZE_E1H/2);
5614 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5615 STORM_INTMEM_SIZE_E1H/2);
5616 bnx2x_init_fill(bp,
5617 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5618 0, STORM_INTMEM_SIZE_E1H/2);
5619 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5620 STORM_INTMEM_SIZE_E1H/2);
5621 bnx2x_init_fill(bp,
5622 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5623 0, STORM_INTMEM_SIZE_E1H/2);
5624 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5625 STORM_INTMEM_SIZE_E1H/2);
5626 bnx2x_init_fill(bp,
5627 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5628 0, STORM_INTMEM_SIZE_E1H/2);
5629 } else { /* E1 */
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005630 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5631 STORM_INTMEM_SIZE_E1);
5632 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5633 STORM_INTMEM_SIZE_E1);
5634 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5635 STORM_INTMEM_SIZE_E1);
5636 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5637 STORM_INTMEM_SIZE_E1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005638 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005639
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005640 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5641 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5642 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5643 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005645 /* sync semi rtc */
5646 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5647 0x80000000);
5648 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5649 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005650
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005651 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5652 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5653 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005654
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005655 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5656 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5657 REG_WR(bp, i, 0xc0cac01a);
5658 /* TODO: replace with something meaningful */
5659 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005660 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005661 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005662
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005663 if (sizeof(union cdu_context) != 1024)
5664 /* we currently assume that a context is 1024 bytes */
5665 printk(KERN_ALERT PFX "please adjust the size of"
5666 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005668 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5669 val = (4 << 24) + (0 << 12) + 1024;
5670 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5671 if (CHIP_IS_E1(bp)) {
5672 /* !!! fix pxp client crdit until excel update */
5673 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5674 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5675 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005676
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005677 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5678 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005679 /* enable context validation interrupt from CFC */
5680 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5681
5682 /* set the thresholds to prevent CFC/CDU race */
5683 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005684
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005685 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5686 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005688 /* PXPCS COMMON comes here */
5689 /* Reset PCIE errors for debug */
5690 REG_WR(bp, 0x2814, 0xffffffff);
5691 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005692
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005693 /* EMAC0 COMMON comes here */
5694 /* EMAC1 COMMON comes here */
5695 /* DBU COMMON comes here */
5696 /* DBG COMMON comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005698 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5699 if (CHIP_IS_E1H(bp)) {
5700 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5701 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5702 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005704 if (CHIP_REV_IS_SLOW(bp))
5705 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005706
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005707 /* finish CFC init */
5708 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5709 if (val != 1) {
5710 BNX2X_ERR("CFC LL_INIT failed\n");
5711 return -EBUSY;
5712 }
5713 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5714 if (val != 1) {
5715 BNX2X_ERR("CFC AC_INIT failed\n");
5716 return -EBUSY;
5717 }
5718 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5719 if (val != 1) {
5720 BNX2X_ERR("CFC CAM_INIT failed\n");
5721 return -EBUSY;
5722 }
5723 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005724
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005725 /* read NIG statistic
5726 to see if this is our first up since powerup */
5727 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5728 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005729
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005730 /* do internal memory self test */
5731 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5732 BNX2X_ERR("internal mem self test failed\n");
5733 return -EBUSY;
5734 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005735
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005736 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00005737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5740 bp->port.need_hw_lock = 1;
5741 break;
5742
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005743 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005744 /* Fan failure is indicated by SPIO 5 */
5745 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5746 MISC_REGISTERS_SPIO_INPUT_HI_Z);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005747
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005748 /* set to active low mode */
5749 val = REG_RD(bp, MISC_REG_SPIO_INT);
5750 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Eliezer Tamirf1410642008-02-28 11:51:50 -08005751 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005752 REG_WR(bp, MISC_REG_SPIO_INT, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005753
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005754 /* enable interrupt to signal the IGU */
5755 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5756 val |= (1 << MISC_REGISTERS_SPIO_5);
5757 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5758 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08005759
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005760 default:
5761 break;
5762 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08005763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005764 /* clear PXP2 attentions */
5765 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005766
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005767 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005768
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005769 if (!BP_NOMCP(bp)) {
5770 bnx2x_acquire_phy_lock(bp);
5771 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5772 bnx2x_release_phy_lock(bp);
5773 } else
5774 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5775
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005776 return 0;
5777}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005778
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005779static int bnx2x_init_port(struct bnx2x *bp)
5780{
5781 int port = BP_PORT(bp);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005782 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005783 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005784
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005785 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5786
5787 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005788
5789 /* Port PXP comes here */
5790 /* Port PXP2 comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005791#ifdef BCM_ISCSI
5792 /* Port0 1
5793 * Port1 385 */
5794 i++;
5795 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5796 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5797 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5798 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5799
5800 /* Port0 2
5801 * Port1 386 */
5802 i++;
5803 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5804 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5805 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5806 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5807
5808 /* Port0 3
5809 * Port1 387 */
5810 i++;
5811 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5812 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5813 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5814 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5815#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005816 /* Port CMs come here */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005817 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5818 (port ? XCM_PORT1_END : XCM_PORT0_END));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819
5820 /* Port QM comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005821#ifdef BCM_ISCSI
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5824
5825 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5826 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5827#endif
5828 /* Port DQ comes here */
Eilon Greenstein1c063282009-02-12 08:36:43 +00005829
5830 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5831 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5832 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5833 /* no pause for emulation and FPGA */
5834 low = 0;
5835 high = 513;
5836 } else {
5837 if (IS_E1HMF(bp))
5838 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5839 else if (bp->dev->mtu > 4096) {
5840 if (bp->flags & ONE_PORT_FLAG)
5841 low = 160;
5842 else {
5843 val = bp->dev->mtu;
5844 /* (24*1024 + val*4)/256 */
5845 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5846 }
5847 } else
5848 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5849 high = low + 56; /* 14*1024/256 */
5850 }
5851 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5852 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5853
5854
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005855 /* Port PRS comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005856 /* Port TSDM comes here */
5857 /* Port CSDM comes here */
5858 /* Port USDM comes here */
5859 /* Port XSDM comes here */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005860 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5861 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5862 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5863 port ? USEM_PORT1_END : USEM_PORT0_END);
5864 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5865 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5866 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5867 port ? XSEM_PORT1_END : XSEM_PORT0_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005868 /* Port UPB comes here */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005869 /* Port XPB comes here */
5870
5871 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5872 port ? PBF_PORT1_END : PBF_PORT0_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005873
5874 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005875 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005876
5877 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005878 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005880 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005881
5882 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005883 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005884 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005885 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005886
5887#ifdef BCM_ISCSI
5888 /* tell the searcher where the T2 table is */
5889 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5890
5891 wb_write[0] = U64_LO(bp->t2_mapping);
5892 wb_write[1] = U64_HI(bp->t2_mapping);
5893 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5894 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5895 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5896 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5897
5898 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5899 /* Port SRCH comes here */
5900#endif
5901 /* Port CDU comes here */
5902 /* Port CFC comes here */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005903
5904 if (CHIP_IS_E1(bp)) {
5905 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5906 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5907 }
5908 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5909 port ? HC_PORT1_END : HC_PORT0_END);
5910
5911 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005912 MISC_AEU_PORT0_START,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005913 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5914 /* init aeu_mask_attn_func_0/1:
5915 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5916 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5917 * bits 4-7 are used for "per vn group attention" */
5918 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5919 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5920
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921 /* Port PXPCS comes here */
5922 /* Port EMAC0 comes here */
5923 /* Port EMAC1 comes here */
5924 /* Port DBU comes here */
5925 /* Port DBG comes here */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005926 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5927 port ? NIG_PORT1_END : NIG_PORT0_END);
5928
5929 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5930
5931 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005932 /* 0x2 disable e1hov, 0x1 enable */
5933 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5934 (IS_E1HMF(bp) ? 0x1 : 0x2));
5935
Eilon Greenstein1c063282009-02-12 08:36:43 +00005936 /* support pause requests from USDM, TSDM and BRB */
5937 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5938
5939 {
5940 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5941 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5942 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5943 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005944 }
5945
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946 /* Port MCP comes here */
5947 /* Port DMAE comes here */
5948
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005949 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00005950 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5951 {
5952 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5953
5954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5955 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5956
5957 /* The GPIO should be swapped if the swap register is
5958 set and active */
5959 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5960 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5961
5962 /* Select function upon port-swap configuration */
5963 if (port == 0) {
5964 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5965 aeu_gpio_mask = (swap_val && swap_override) ?
5966 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5967 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5968 } else {
5969 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5970 aeu_gpio_mask = (swap_val && swap_override) ?
5971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5972 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5973 }
5974 val = REG_RD(bp, offset);
5975 /* add GPIO3 to group */
5976 val |= aeu_gpio_mask;
5977 REG_WR(bp, offset, val);
5978 }
5979 break;
5980
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eliezer Tamirf1410642008-02-28 11:51:50 -08005982 /* add SPIO 5 to group 0 */
5983 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5984 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5985 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5986 break;
5987
5988 default:
5989 break;
5990 }
5991
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005992 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005993
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005994 return 0;
5995}
5996
5997#define ILT_PER_FUNC (768/2)
5998#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5999/* the phys address is shifted right 12 bits and has an added
6000 1=valid bit added to the 53rd bit
6001 then since this is a wide register(TM)
6002 we split it into two 32 bit writes
6003 */
6004#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6005#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6006#define PXP_ONE_ILT(x) (((x) << 10) | x)
6007#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6008
6009#define CNIC_ILT_LINES 0
6010
6011static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6012{
6013 int reg;
6014
6015 if (CHIP_IS_E1H(bp))
6016 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6017 else /* E1 */
6018 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6019
6020 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6021}
6022
6023static int bnx2x_init_func(struct bnx2x *bp)
6024{
6025 int port = BP_PORT(bp);
6026 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006027 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006028 int i;
6029
6030 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6031
Eilon Greenstein8badd272009-02-12 08:36:15 +00006032 /* set MSI reconfigure capability */
6033 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6034 val = REG_RD(bp, addr);
6035 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6036 REG_WR(bp, addr, val);
6037
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006038 i = FUNC_ILT_BASE(func);
6039
6040 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6041 if (CHIP_IS_E1H(bp)) {
6042 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6043 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6044 } else /* E1 */
6045 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6046 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6047
6048
6049 if (CHIP_IS_E1H(bp)) {
6050 for (i = 0; i < 9; i++)
6051 bnx2x_init_block(bp,
6052 cm_start[func][i], cm_end[func][i]);
6053
6054 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6055 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6056 }
6057
6058 /* HC init per function */
6059 if (CHIP_IS_E1H(bp)) {
6060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6061
6062 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6063 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6064 }
6065 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6066
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006067 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068 REG_WR(bp, 0x2114, 0xffffffff);
6069 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006070
6071 return 0;
6072}
6073
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006074static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6075{
6076 int i, rc = 0;
6077
6078 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6079 BP_FUNC(bp), load_code);
6080
6081 bp->dmae_ready = 0;
6082 mutex_init(&bp->dmae_mutex);
6083 bnx2x_gunzip_init(bp);
6084
6085 switch (load_code) {
6086 case FW_MSG_CODE_DRV_LOAD_COMMON:
6087 rc = bnx2x_init_common(bp);
6088 if (rc)
6089 goto init_hw_err;
6090 /* no break */
6091
6092 case FW_MSG_CODE_DRV_LOAD_PORT:
6093 bp->dmae_ready = 1;
6094 rc = bnx2x_init_port(bp);
6095 if (rc)
6096 goto init_hw_err;
6097 /* no break */
6098
6099 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6100 bp->dmae_ready = 1;
6101 rc = bnx2x_init_func(bp);
6102 if (rc)
6103 goto init_hw_err;
6104 break;
6105
6106 default:
6107 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6108 break;
6109 }
6110
6111 if (!BP_NOMCP(bp)) {
6112 int func = BP_FUNC(bp);
6113
6114 bp->fw_drv_pulse_wr_seq =
6115 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6116 DRV_PULSE_SEQ_MASK);
6117 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6118 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6119 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6120 } else
6121 bp->func_stx = 0;
6122
6123 /* this needs to be done before gunzip end */
6124 bnx2x_zero_def_sb(bp);
6125 for_each_queue(bp, i)
6126 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6127
6128init_hw_err:
6129 bnx2x_gunzip_end(bp);
6130
6131 return rc;
6132}
6133
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006134/* send the MCP a request, block until there is a reply */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006135static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6136{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006137 int func = BP_FUNC(bp);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006138 u32 seq = ++bp->fw_seq;
6139 u32 rc = 0;
Eilon Greenstein19680c42008-08-13 15:47:33 -07006140 u32 cnt = 1;
6141 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006142
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006143 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
Eliezer Tamirf1410642008-02-28 11:51:50 -08006144 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006145
Eilon Greenstein19680c42008-08-13 15:47:33 -07006146 do {
6147 /* let the FW do it's magic ... */
6148 msleep(delay);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006149
Eilon Greenstein19680c42008-08-13 15:47:33 -07006150 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151
Eilon Greenstein19680c42008-08-13 15:47:33 -07006152 /* Give the FW up to 2 second (200*10ms) */
6153 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6154
6155 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6156 cnt*delay, rc, seq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006157
6158 /* is this a reply to our command? */
6159 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6160 rc &= FW_MSG_CODE_MASK;
Eliezer Tamirf1410642008-02-28 11:51:50 -08006161
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006162 } else {
6163 /* FW BUG! */
6164 BNX2X_ERR("FW failed to respond!\n");
6165 bnx2x_fw_dump(bp);
6166 rc = 0;
6167 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006168
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006169 return rc;
6170}
6171
6172static void bnx2x_free_mem(struct bnx2x *bp)
6173{
6174
6175#define BNX2X_PCI_FREE(x, y, size) \
6176 do { \
6177 if (x) { \
6178 pci_free_consistent(bp->pdev, size, x, y); \
6179 x = NULL; \
6180 y = 0; \
6181 } \
6182 } while (0)
6183
6184#define BNX2X_FREE(x) \
6185 do { \
6186 if (x) { \
6187 vfree(x); \
6188 x = NULL; \
6189 } \
6190 } while (0)
6191
6192 int i;
6193
6194 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006195 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006196 for_each_queue(bp, i) {
6197
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006198 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6200 bnx2x_fp(bp, i, status_blk_mapping),
6201 sizeof(struct host_status_block) +
6202 sizeof(struct eth_tx_db_data));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006203 }
6204 /* Rx */
6205 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006206
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006207 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006208 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6209 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6210 bnx2x_fp(bp, i, rx_desc_mapping),
6211 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6212
6213 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6214 bnx2x_fp(bp, i, rx_comp_mapping),
6215 sizeof(struct eth_fast_path_rx_cqe) *
6216 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006217
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006218 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006219 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006220 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6221 bnx2x_fp(bp, i, rx_sge_mapping),
6222 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6223 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006224 /* Tx */
6225 for_each_tx_queue(bp, i) {
6226
6227 /* fastpath tx rings: tx_buf tx_desc */
6228 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6230 bnx2x_fp(bp, i, tx_desc_mapping),
6231 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6232 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006233 /* end of fastpath */
6234
6235 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006236 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006237
6238 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006239 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006240
6241#ifdef BCM_ISCSI
6242 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6243 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6244 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6245 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6246#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006247 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006248
6249#undef BNX2X_PCI_FREE
6250#undef BNX2X_KFREE
6251}
6252
6253static int bnx2x_alloc_mem(struct bnx2x *bp)
6254{
6255
6256#define BNX2X_PCI_ALLOC(x, y, size) \
6257 do { \
6258 x = pci_alloc_consistent(bp->pdev, size, y); \
6259 if (x == NULL) \
6260 goto alloc_mem_err; \
6261 memset(x, 0, size); \
6262 } while (0)
6263
6264#define BNX2X_ALLOC(x, size) \
6265 do { \
6266 x = vmalloc(size); \
6267 if (x == NULL) \
6268 goto alloc_mem_err; \
6269 memset(x, 0, size); \
6270 } while (0)
6271
6272 int i;
6273
6274 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006275 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006276 for_each_queue(bp, i) {
6277 bnx2x_fp(bp, i, bp) = bp;
6278
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006279 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6281 &bnx2x_fp(bp, i, status_blk_mapping),
6282 sizeof(struct host_status_block) +
6283 sizeof(struct eth_tx_db_data));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006284 }
6285 /* Rx */
6286 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006287
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006288 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006289 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6290 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6291 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6292 &bnx2x_fp(bp, i, rx_desc_mapping),
6293 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6294
6295 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6296 &bnx2x_fp(bp, i, rx_comp_mapping),
6297 sizeof(struct eth_fast_path_rx_cqe) *
6298 NUM_RCQ_BD);
6299
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006300 /* SGE ring */
6301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6302 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6304 &bnx2x_fp(bp, i, rx_sge_mapping),
6305 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006306 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006307 /* Tx */
6308 for_each_tx_queue(bp, i) {
6309
6310 bnx2x_fp(bp, i, hw_tx_prods) =
6311 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6312
6313 bnx2x_fp(bp, i, tx_prods_mapping) =
6314 bnx2x_fp(bp, i, status_blk_mapping) +
6315 sizeof(struct host_status_block);
6316
6317 /* fastpath tx rings: tx_buf tx_desc */
6318 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6319 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6320 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6321 &bnx2x_fp(bp, i, tx_desc_mapping),
6322 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6323 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006324 /* end of fastpath */
6325
6326 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6327 sizeof(struct host_def_status_block));
6328
6329 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6330 sizeof(struct bnx2x_slowpath));
6331
6332#ifdef BCM_ISCSI
6333 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6334
6335 /* Initialize T1 */
6336 for (i = 0; i < 64*1024; i += 64) {
6337 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6338 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6339 }
6340
6341 /* allocate searcher T2 table
6342 we allocate 1/4 of alloc num for T2
6343 (which is not entered into the ILT) */
6344 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6345
6346 /* Initialize T2 */
6347 for (i = 0; i < 16*1024; i += 64)
6348 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6349
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006350 /* now fixup the last line in the block to point to the next block */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006351 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6352
6353 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6354 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6355
6356 /* QM queues (128*MAX_CONN) */
6357 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6358#endif
6359
6360 /* Slow path ring */
6361 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6362
6363 return 0;
6364
6365alloc_mem_err:
6366 bnx2x_free_mem(bp);
6367 return -ENOMEM;
6368
6369#undef BNX2X_PCI_ALLOC
6370#undef BNX2X_ALLOC
6371}
6372
6373static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6374{
6375 int i;
6376
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006377 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006378 struct bnx2x_fastpath *fp = &bp->fp[i];
6379
6380 u16 bd_cons = fp->tx_bd_cons;
6381 u16 sw_prod = fp->tx_pkt_prod;
6382 u16 sw_cons = fp->tx_pkt_cons;
6383
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006384 while (sw_cons != sw_prod) {
6385 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6386 sw_cons++;
6387 }
6388 }
6389}
6390
6391static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6392{
6393 int i, j;
6394
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006395 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006396 struct bnx2x_fastpath *fp = &bp->fp[j];
6397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006398 for (i = 0; i < NUM_RX_BD; i++) {
6399 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6400 struct sk_buff *skb = rx_buf->skb;
6401
6402 if (skb == NULL)
6403 continue;
6404
6405 pci_unmap_single(bp->pdev,
6406 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07006407 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006408 PCI_DMA_FROMDEVICE);
6409
6410 rx_buf->skb = NULL;
6411 dev_kfree_skb(skb);
6412 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006413 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006414 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6415 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006416 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006417 }
6418}
6419
6420static void bnx2x_free_skbs(struct bnx2x *bp)
6421{
6422 bnx2x_free_tx_skbs(bp);
6423 bnx2x_free_rx_skbs(bp);
6424}
6425
6426static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6427{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006428 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006429
6430 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006431 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006432 bp->msix_table[0].vector);
6433
6434 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006435 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006436 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006437 bnx2x_fp(bp, i, state));
6438
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006439 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006440 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006441}
6442
6443static void bnx2x_free_irq(struct bnx2x *bp)
6444{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006445 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006446 bnx2x_free_msix_irqs(bp);
6447 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006448 bp->flags &= ~USING_MSIX_FLAG;
6449
Eilon Greenstein8badd272009-02-12 08:36:15 +00006450 } else if (bp->flags & USING_MSI_FLAG) {
6451 free_irq(bp->pdev->irq, bp->dev);
6452 pci_disable_msi(bp->pdev);
6453 bp->flags &= ~USING_MSI_FLAG;
6454
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006455 } else
6456 free_irq(bp->pdev->irq, bp->dev);
6457}
6458
6459static int bnx2x_enable_msix(struct bnx2x *bp)
6460{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006461 int i, rc, offset = 1;
6462 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006463
Eilon Greenstein8badd272009-02-12 08:36:15 +00006464 bp->msix_table[0].entry = igu_vec;
6465 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006466
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006467 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006468 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006469 bp->msix_table[i + offset].entry = igu_vec;
6470 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6471 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006472 }
6473
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006474 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006475 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006476 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006477 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6478 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006479 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006480
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006481 bp->flags |= USING_MSIX_FLAG;
6482
6483 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006484}
6485
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006486static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6487{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006488 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006489
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006490 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6491 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006492 if (rc) {
6493 BNX2X_ERR("request sp irq failed\n");
6494 return -EBUSY;
6495 }
6496
6497 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006498 struct bnx2x_fastpath *fp = &bp->fp[i];
6499
6500 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006501 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006502 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006503 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006504 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006505 bnx2x_free_msix_irqs(bp);
6506 return -EBUSY;
6507 }
6508
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006509 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006510 }
6511
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006512 i = BNX2X_NUM_QUEUES(bp);
6513 if (is_multi(bp))
6514 printk(KERN_INFO PFX
6515 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6516 bp->dev->name, bp->msix_table[0].vector,
6517 bp->msix_table[offset].vector,
6518 bp->msix_table[offset + i - 1].vector);
6519 else
6520 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6521 bp->dev->name, bp->msix_table[0].vector,
6522 bp->msix_table[offset + i - 1].vector);
6523
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006524 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006525}
6526
Eilon Greenstein8badd272009-02-12 08:36:15 +00006527static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006528{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006529 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006530
Eilon Greenstein8badd272009-02-12 08:36:15 +00006531 rc = pci_enable_msi(bp->pdev);
6532 if (rc) {
6533 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6534 return -1;
6535 }
6536 bp->flags |= USING_MSI_FLAG;
6537
6538 return 0;
6539}
6540
6541static int bnx2x_req_irq(struct bnx2x *bp)
6542{
6543 unsigned long flags;
6544 int rc;
6545
6546 if (bp->flags & USING_MSI_FLAG)
6547 flags = 0;
6548 else
6549 flags = IRQF_SHARED;
6550
6551 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006552 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006553 if (!rc)
6554 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6555
6556 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006557}
6558
Yitchak Gertner65abd742008-08-25 15:26:24 -07006559static void bnx2x_napi_enable(struct bnx2x *bp)
6560{
6561 int i;
6562
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006563 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006564 napi_enable(&bnx2x_fp(bp, i, napi));
6565}
6566
6567static void bnx2x_napi_disable(struct bnx2x *bp)
6568{
6569 int i;
6570
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006571 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006572 napi_disable(&bnx2x_fp(bp, i, napi));
6573}
6574
6575static void bnx2x_netif_start(struct bnx2x *bp)
6576{
6577 if (atomic_dec_and_test(&bp->intr_sem)) {
6578 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006579 bnx2x_napi_enable(bp);
6580 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006581 if (bp->state == BNX2X_STATE_OPEN)
6582 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006583 }
6584 }
6585}
6586
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006587static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006588{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006589 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00006590 bnx2x_napi_disable(bp);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006591 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006592 netif_tx_disable(bp->dev);
6593 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6594 }
6595}
6596
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006597/*
6598 * Init service functions
6599 */
6600
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006601static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006602{
6603 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006604 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006605
6606 /* CAM allocation
6607 * unicasts 0-31:port0 32-63:port1
6608 * multicast 64-127:port0 128-191:port1
6609 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006610 config->hdr.length = 2;
Eilon Greensteinaf246402009-01-14 06:43:59 +00006611 config->hdr.offset = port ? 32 : 0;
Eilon Greenstein0626b892009-02-12 08:38:14 +00006612 config->hdr.client_id = bp->fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006613 config->hdr.reserved1 = 0;
6614
6615 /* primary MAC */
6616 config->config_table[0].cam_entry.msb_mac_addr =
6617 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6618 config->config_table[0].cam_entry.middle_mac_addr =
6619 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6620 config->config_table[0].cam_entry.lsb_mac_addr =
6621 swab16(*(u16 *)&bp->dev->dev_addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006622 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006623 if (set)
6624 config->config_table[0].target_table_entry.flags = 0;
6625 else
6626 CAM_INVALIDATE(config->config_table[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006627 config->config_table[0].target_table_entry.client_id = 0;
6628 config->config_table[0].target_table_entry.vlan_id = 0;
6629
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006630 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6631 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006632 config->config_table[0].cam_entry.msb_mac_addr,
6633 config->config_table[0].cam_entry.middle_mac_addr,
6634 config->config_table[0].cam_entry.lsb_mac_addr);
6635
6636 /* broadcast */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00006637 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6638 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6639 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006640 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006641 if (set)
6642 config->config_table[1].target_table_entry.flags =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006643 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006644 else
6645 CAM_INVALIDATE(config->config_table[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006646 config->config_table[1].target_table_entry.client_id = 0;
6647 config->config_table[1].target_table_entry.vlan_id = 0;
6648
6649 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6650 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6651 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6652}
6653
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006654static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006655{
6656 struct mac_configuration_cmd_e1h *config =
6657 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6658
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006659 if (set && (bp->state != BNX2X_STATE_OPEN)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006660 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6661 return;
6662 }
6663
6664 /* CAM allocation for E1H
6665 * unicasts: by func number
6666 * multicast: 20+FUNC*20, 20 each
6667 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006668 config->hdr.length = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006669 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00006670 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006671 config->hdr.reserved1 = 0;
6672
6673 /* primary MAC */
6674 config->config_table[0].msb_mac_addr =
6675 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6676 config->config_table[0].middle_mac_addr =
6677 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6678 config->config_table[0].lsb_mac_addr =
6679 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6680 config->config_table[0].client_id = BP_L_ID(bp);
6681 config->config_table[0].vlan_id = 0;
6682 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006683 if (set)
6684 config->config_table[0].flags = BP_PORT(bp);
6685 else
6686 config->config_table[0].flags =
6687 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006688
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006689 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6690 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006691 config->config_table[0].msb_mac_addr,
6692 config->config_table[0].middle_mac_addr,
6693 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6694
6695 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6696 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6697 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6698}
6699
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006700static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6701 int *state_p, int poll)
6702{
6703 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006704 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006705
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006706 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6707 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006708
6709 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006710 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006711 if (poll) {
6712 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006713 /* if index is different from 0
6714 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006715 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006716 */
6717 if (idx)
6718 bnx2x_rx_int(&bp->fp[idx], 10);
6719 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006720
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006721 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006722 if (*state_p == state) {
6723#ifdef BNX2X_STOP_ON_ERROR
6724 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6725#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006726 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006727 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006728
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006729 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006730 }
6731
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006732 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006733 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6734 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006735#ifdef BNX2X_STOP_ON_ERROR
6736 bnx2x_panic();
6737#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006738
Eliezer Tamir49d66772008-02-28 11:53:13 -08006739 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006740}
6741
6742static int bnx2x_setup_leading(struct bnx2x *bp)
6743{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006744 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006745
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006746 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006747 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006748
6749 /* SETUP ramrod */
6750 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6751
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006752 /* Wait for completion */
6753 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006754
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006755 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006756}
6757
6758static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6759{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006760 struct bnx2x_fastpath *fp = &bp->fp[index];
6761
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006762 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006763 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006764
Eliezer Tamir228241e2008-02-28 11:56:57 -08006765 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006766 fp->state = BNX2X_FP_STATE_OPENING;
6767 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6768 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006769
6770 /* Wait for completion */
6771 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006772 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006773}
6774
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006775static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006776
Eilon Greenstein8badd272009-02-12 08:36:15 +00006777static void bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006778{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006779 int num_queues;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006780
Eilon Greenstein8badd272009-02-12 08:36:15 +00006781 switch (int_mode) {
6782 case INT_MODE_INTx:
6783 case INT_MODE_MSI:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006784 num_queues = 1;
6785 bp->num_rx_queues = num_queues;
6786 bp->num_tx_queues = num_queues;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006787 DP(NETIF_MSG_IFUP,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006788 "set number of queues to %d\n", num_queues);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006789 break;
6790
6791 case INT_MODE_MSIX:
6792 default:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006793 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6794 num_queues = min_t(u32, num_online_cpus(),
6795 BNX2X_MAX_QUEUES(bp));
6796 else
6797 num_queues = 1;
6798 bp->num_rx_queues = num_queues;
6799 bp->num_tx_queues = num_queues;
6800 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6801 " number of tx queues to %d\n",
6802 bp->num_rx_queues, bp->num_tx_queues);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006803 /* if we can't use MSI-X we only need one fp,
6804 * so try to enable MSI-X with the requested number of fp's
6805 * and fallback to MSI or legacy INTx with one fp
6806 */
Eilon Greenstein8badd272009-02-12 08:36:15 +00006807 if (bnx2x_enable_msix(bp)) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006808 /* failed to enable MSI-X */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006809 num_queues = 1;
6810 bp->num_rx_queues = num_queues;
6811 bp->num_tx_queues = num_queues;
6812 if (bp->multi_mode)
6813 BNX2X_ERR("Multi requested but failed to "
6814 "enable MSI-X set number of "
6815 "queues to %d\n", num_queues);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006816 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006817 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006818 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006819 bp->dev->real_num_tx_queues = bp->num_tx_queues;
Eilon Greenstein8badd272009-02-12 08:36:15 +00006820}
6821
6822static void bnx2x_set_rx_mode(struct net_device *dev);
6823
6824/* must be called with rtnl_lock */
6825static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6826{
6827 u32 load_code;
6828 int i, rc = 0;
6829#ifdef BNX2X_STOP_ON_ERROR
6830 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6831 if (unlikely(bp->panic))
6832 return -EPERM;
6833#endif
6834
6835 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6836
6837 bnx2x_set_int_mode(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006838
6839 if (bnx2x_alloc_mem(bp))
6840 return -ENOMEM;
6841
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006842 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006843 bnx2x_fp(bp, i, disable_tpa) =
6844 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6845
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006846 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006847 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6848 bnx2x_poll, 128);
6849
6850#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006851 for_each_rx_queue(bp, i) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006852 struct bnx2x_fastpath *fp = &bp->fp[i];
6853
6854 fp->poll_no_work = 0;
6855 fp->poll_calls = 0;
6856 fp->poll_max_calls = 0;
6857 fp->poll_complete = 0;
6858 fp->poll_exit = 0;
6859 }
6860#endif
6861 bnx2x_napi_enable(bp);
6862
6863 if (bp->flags & USING_MSIX_FLAG) {
6864 rc = bnx2x_req_msix_irqs(bp);
6865 if (rc) {
6866 pci_disable_msix(bp->pdev);
6867 goto load_error1;
6868 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006869 } else {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006870 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6871 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006872 bnx2x_ack_int(bp);
6873 rc = bnx2x_req_irq(bp);
6874 if (rc) {
6875 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006876 if (bp->flags & USING_MSI_FLAG)
6877 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006878 goto load_error1;
6879 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006880 if (bp->flags & USING_MSI_FLAG) {
6881 bp->dev->irq = bp->pdev->irq;
6882 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6883 bp->dev->name, bp->pdev->irq);
6884 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006885 }
6886
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006887 /* Send LOAD_REQUEST command to MCP
6888 Returns the type of LOAD command:
6889 if it is the first port to be initialized
6890 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006891 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006892 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006893 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6894 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006895 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006896 rc = -EBUSY;
6897 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006898 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006899 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6900 rc = -EBUSY; /* other port in diagnostic mode */
6901 goto load_error2;
6902 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006903
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006904 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006905 int port = BP_PORT(bp);
6906
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006907 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6908 load_count[0], load_count[1], load_count[2]);
6909 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006910 load_count[1 + port]++;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006911 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6912 load_count[0], load_count[1], load_count[2]);
6913 if (load_count[0] == 1)
6914 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006915 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006916 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6917 else
6918 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006919 }
6920
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006921 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6922 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6923 bp->port.pmf = 1;
6924 else
6925 bp->port.pmf = 0;
6926 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6927
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006928 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006929 rc = bnx2x_init_hw(bp, load_code);
6930 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006931 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006932 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006933 }
6934
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006935 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07006936 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006937
6938 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006939 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006940 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6941 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006942 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006943 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006944 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006945 }
6946 }
6947
6948 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6949
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006950 rc = bnx2x_setup_leading(bp);
6951 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006952 BNX2X_ERR("Setup leading failed!\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006953 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006954 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006955
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006956 if (CHIP_IS_E1H(bp))
6957 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6958 BNX2X_ERR("!!! mf_cfg function disabled\n");
6959 bp->state = BNX2X_STATE_DISABLED;
6960 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006961
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006962 if (bp->state == BNX2X_STATE_OPEN)
6963 for_each_nondefault_queue(bp, i) {
6964 rc = bnx2x_setup_multi(bp, i);
6965 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006966 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006967 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006968
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006969 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006970 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006971 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006972 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006973
6974 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00006975 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006976
6977 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 switch (load_mode) {
6979 case LOAD_NORMAL:
6980 /* Tx queue should be only reenabled */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006981 netif_tx_wake_all_queues(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006982 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006983 bnx2x_set_rx_mode(bp->dev);
6984 break;
6985
6986 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006987 netif_tx_start_all_queues(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006988 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006989 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006990 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006991
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006992 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006993 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006994 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006995 bp->state = BNX2X_STATE_DIAG;
6996 break;
6997
6998 default:
6999 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007000 }
7001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007002 if (!bp->port.pmf)
7003 bnx2x__link_status_update(bp);
7004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007005 /* start the timer */
7006 mod_timer(&bp->timer, jiffies + bp->current_interval);
7007
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007008
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007009 return 0;
7010
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007011load_error3:
7012 bnx2x_int_disable_sync(bp, 1);
7013 if (!BP_NOMCP(bp)) {
7014 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7015 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7016 }
7017 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007018 /* Free SKBs, SGEs, TPA pool and driver internals */
7019 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007020 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007021 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007022load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007023 /* Release IRQs */
7024 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007025load_error1:
7026 bnx2x_napi_disable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007027 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007028 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007029 bnx2x_free_mem(bp);
7030
7031 /* TBD we really need to reset the chip
7032 if we want to recover from this */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007033 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007034}
7035
7036static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7037{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007038 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007039 int rc;
7040
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007041 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007042 fp->state = BNX2X_FP_STATE_HALTING;
7043 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007044
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007045 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007046 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007047 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007048 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007049 return rc;
7050
7051 /* delete cfc entry */
7052 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7053
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007054 /* Wait for completion */
7055 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007056 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007057 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007058}
7059
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007060static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007061{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007062 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007063 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007064 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007065 int cnt = 500;
7066 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007067
7068 might_sleep();
7069
7070 /* Send HALT ramrod */
7071 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007072 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007073
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007074 /* Wait for completion */
7075 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7076 &(bp->fp[0].state), 1);
7077 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007078 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007079
Eliezer Tamir49d66772008-02-28 11:53:13 -08007080 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007081
Eliezer Tamir228241e2008-02-28 11:56:57 -08007082 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007083 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7084
Eliezer Tamir49d66772008-02-28 11:53:13 -08007085 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007086 we are going to reset the chip anyway
7087 so there is not much to do if this times out
7088 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007089 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007090 if (!cnt) {
7091 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7092 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7093 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7094#ifdef BNX2X_STOP_ON_ERROR
7095 bnx2x_panic();
7096#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007097 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007098 break;
7099 }
7100 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007101 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007102 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007103 }
7104 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7105 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007106
7107 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007108}
7109
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007110static void bnx2x_reset_func(struct bnx2x *bp)
7111{
7112 int port = BP_PORT(bp);
7113 int func = BP_FUNC(bp);
7114 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007115
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007116 /* Configure IGU */
7117 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7118 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7119
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007120 /* Clear ILT */
7121 base = FUNC_ILT_BASE(func);
7122 for (i = base; i < base + ILT_PER_FUNC; i++)
7123 bnx2x_ilt_wr(bp, i, 0);
7124}
7125
7126static void bnx2x_reset_port(struct bnx2x *bp)
7127{
7128 int port = BP_PORT(bp);
7129 u32 val;
7130
7131 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7132
7133 /* Do not rcv packets to BRB */
7134 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7135 /* Do not direct rcv packets that are not for MCP to the BRB */
7136 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7137 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7138
7139 /* Configure AEU */
7140 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7141
7142 msleep(100);
7143 /* Check for BRB port occupancy */
7144 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7145 if (val)
7146 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007147 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007148
7149 /* TODO: Close Doorbell port? */
7150}
7151
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007152static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7153{
7154 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7155 BP_FUNC(bp), reset_code);
7156
7157 switch (reset_code) {
7158 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7159 bnx2x_reset_port(bp);
7160 bnx2x_reset_func(bp);
7161 bnx2x_reset_common(bp);
7162 break;
7163
7164 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7165 bnx2x_reset_port(bp);
7166 bnx2x_reset_func(bp);
7167 break;
7168
7169 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7170 bnx2x_reset_func(bp);
7171 break;
7172
7173 default:
7174 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7175 break;
7176 }
7177}
7178
Eilon Greenstein33471622008-08-13 15:59:08 -07007179/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007180static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007181{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007182 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007183 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007184 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007185
7186 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7187
Eliezer Tamir228241e2008-02-28 11:56:57 -08007188 bp->rx_mode = BNX2X_RX_MODE_NONE;
7189 bnx2x_set_storm_rx_mode(bp);
7190
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007191 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007192
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007193 del_timer_sync(&bp->timer);
7194 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7195 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007196 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007197
Eilon Greenstein70b99862009-01-14 06:43:48 +00007198 /* Release IRQs */
7199 bnx2x_free_irq(bp);
7200
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007201 /* Wait until tx fastpath tasks complete */
7202 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007203 struct bnx2x_fastpath *fp = &bp->fp[i];
7204
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007205 cnt = 1000;
Eilon Greenstein3e5b5102009-02-12 08:37:25 +00007206 smp_mb();
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007207 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007208
Yitchak Gertner65abd742008-08-25 15:26:24 -07007209 bnx2x_tx_int(fp, 1000);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007210 if (!cnt) {
7211 BNX2X_ERR("timeout waiting for queue[%d]\n",
7212 i);
7213#ifdef BNX2X_STOP_ON_ERROR
7214 bnx2x_panic();
7215 return -EBUSY;
7216#else
7217 break;
7218#endif
7219 }
7220 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007221 msleep(1);
Eilon Greenstein3e5b5102009-02-12 08:37:25 +00007222 smp_mb();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007223 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007224 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007225 /* Give HW time to discard old tx messages */
7226 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007227
Yitchak Gertner65abd742008-08-25 15:26:24 -07007228 if (CHIP_IS_E1(bp)) {
7229 struct mac_configuration_cmd *config =
7230 bnx2x_sp(bp, mcast_config);
7231
7232 bnx2x_set_mac_addr_e1(bp, 0);
7233
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007234 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007235 CAM_INVALIDATE(config->config_table[i]);
7236
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007237 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007238 if (CHIP_REV_IS_SLOW(bp))
7239 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7240 else
7241 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007242 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007243 config->hdr.reserved1 = 0;
7244
7245 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7246 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7247 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7248
7249 } else { /* E1H */
7250 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7251
7252 bnx2x_set_mac_addr_e1h(bp, 0);
7253
7254 for (i = 0; i < MC_HASH_SIZE; i++)
7255 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7256 }
7257
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007258 if (unload_mode == UNLOAD_NORMAL)
7259 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007260
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007261 else if (bp->flags & NO_WOL_FLAG) {
7262 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7263 if (CHIP_IS_E1H(bp))
7264 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7265
7266 } else if (bp->wol) {
7267 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007268 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007269 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007270 /* The mac address is written to entries 1-4 to
7271 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007272 u8 entry = (BP_E1HVN(bp) + 1)*8;
7273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007274 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007275 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007276
7277 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7278 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007279 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007280
7281 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007282
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007283 } else
7284 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7285
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007286 /* Close multi and leading connections
7287 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007288 for_each_nondefault_queue(bp, i)
7289 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007290 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007291
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007292 rc = bnx2x_stop_leading(bp);
7293 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007294 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007295#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007296 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007297#else
7298 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007299#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007300 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007301
Eliezer Tamir228241e2008-02-28 11:56:57 -08007302unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007303 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007304 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007305 else {
7306 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7307 load_count[0], load_count[1], load_count[2]);
7308 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007309 load_count[1 + port]--;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007310 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7311 load_count[0], load_count[1], load_count[2]);
7312 if (load_count[0] == 0)
7313 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007314 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007315 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7316 else
7317 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7318 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007320 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7321 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7322 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007323
7324 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007325 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007326
7327 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007328 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007329 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein9a035442008-11-03 16:45:55 -08007330 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007331
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007332 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007333 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007334 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007335 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007336 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007337 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007338 bnx2x_free_mem(bp);
7339
7340 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007341
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007342 netif_carrier_off(bp->dev);
7343
7344 return 0;
7345}
7346
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007347static void bnx2x_reset_task(struct work_struct *work)
7348{
7349 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7350
7351#ifdef BNX2X_STOP_ON_ERROR
7352 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7353 " so reset not done to allow debug dump,\n"
7354 KERN_ERR " you will need to reboot when done\n");
7355 return;
7356#endif
7357
7358 rtnl_lock();
7359
7360 if (!netif_running(bp->dev))
7361 goto reset_task_exit;
7362
7363 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7364 bnx2x_nic_load(bp, LOAD_NORMAL);
7365
7366reset_task_exit:
7367 rtnl_unlock();
7368}
7369
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370/* end of nic load/unload */
7371
7372/* ethtool_ops */
7373
7374/*
7375 * Init service functions
7376 */
7377
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007378static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7379{
7380 switch (func) {
7381 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7382 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7383 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7384 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7385 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7386 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7387 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7388 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7389 default:
7390 BNX2X_ERR("Unsupported function index: %d\n", func);
7391 return (u32)(-1);
7392 }
7393}
7394
7395static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7396{
7397 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7398
7399 /* Flush all outstanding writes */
7400 mmiowb();
7401
7402 /* Pretend to be function 0 */
7403 REG_WR(bp, reg, 0);
7404 /* Flush the GRC transaction (in the chip) */
7405 new_val = REG_RD(bp, reg);
7406 if (new_val != 0) {
7407 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7408 new_val);
7409 BUG();
7410 }
7411
7412 /* From now we are in the "like-E1" mode */
7413 bnx2x_int_disable(bp);
7414
7415 /* Flush all outstanding writes */
7416 mmiowb();
7417
7418 /* Restore the original funtion settings */
7419 REG_WR(bp, reg, orig_func);
7420 new_val = REG_RD(bp, reg);
7421 if (new_val != orig_func) {
7422 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7423 orig_func, new_val);
7424 BUG();
7425 }
7426}
7427
7428static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7429{
7430 if (CHIP_IS_E1H(bp))
7431 bnx2x_undi_int_disable_e1h(bp, func);
7432 else
7433 bnx2x_int_disable(bp);
7434}
7435
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007436static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007437{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007438 u32 val;
7439
7440 /* Check if there is any driver already loaded */
7441 val = REG_RD(bp, MISC_REG_UNPREPARED);
7442 if (val == 0x1) {
7443 /* Check if it is the UNDI driver
7444 * UNDI driver initializes CID offset for normal bell to 0x7
7445 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007446 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007447 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7448 if (val == 0x7) {
7449 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007450 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007451 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007452 u32 swap_en;
7453 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007454
Eilon Greensteinb4661732009-01-14 06:43:56 +00007455 /* clear the UNDI indication */
7456 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7457
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007458 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7459
7460 /* try unload UNDI on port 0 */
7461 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007462 bp->fw_seq =
7463 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7464 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007465 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007466
7467 /* if UNDI is loaded on the other port */
7468 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7469
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007470 /* send "DONE" for previous unload */
7471 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7472
7473 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007474 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007475 bp->fw_seq =
7476 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7477 DRV_MSG_SEQ_NUMBER_MASK);
7478 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007479
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007480 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007481 }
7482
Eilon Greensteinb4661732009-01-14 06:43:56 +00007483 /* now it's safe to release the lock */
7484 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7485
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007486 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007487
7488 /* close input traffic and wait for it */
7489 /* Do not rcv packets to BRB */
7490 REG_WR(bp,
7491 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7492 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7493 /* Do not direct rcv packets that are not for MCP to
7494 * the BRB */
7495 REG_WR(bp,
7496 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7497 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7498 /* clear AEU */
7499 REG_WR(bp,
7500 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7501 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7502 msleep(10);
7503
7504 /* save NIG port swap info */
7505 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7506 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007507 /* reset device */
7508 REG_WR(bp,
7509 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007510 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7513 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007514 /* take the NIG out of reset and restore swap values */
7515 REG_WR(bp,
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7517 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7518 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7519 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7520
7521 /* send unload done to the MCP */
7522 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7523
7524 /* restore our func and fw_seq */
7525 bp->func = func;
7526 bp->fw_seq =
7527 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7528 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007529
7530 } else
7531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007532 }
7533}
7534
7535static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7536{
7537 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007538 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007539
7540 /* Get the chip revision id and number. */
7541 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7542 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7543 id = ((val & 0xffff) << 16);
7544 val = REG_RD(bp, MISC_REG_CHIP_REV);
7545 id |= ((val & 0xf) << 12);
7546 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7547 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007548 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007549 id |= (val & 0xf);
7550 bp->common.chip_id = id;
7551 bp->link_params.chip_id = bp->common.chip_id;
7552 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7553
Eilon Greenstein1c063282009-02-12 08:36:43 +00007554 val = (REG_RD(bp, 0x2874) & 0x55);
7555 if ((bp->common.chip_id & 0x1) ||
7556 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7557 bp->flags |= ONE_PORT_FLAG;
7558 BNX2X_DEV_INFO("single port device\n");
7559 }
7560
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007561 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7562 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7563 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7564 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7565 bp->common.flash_size, bp->common.flash_size);
7566
7567 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7568 bp->link_params.shmem_base = bp->common.shmem_base;
7569 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7570
7571 if (!bp->common.shmem_base ||
7572 (bp->common.shmem_base < 0xA0000) ||
7573 (bp->common.shmem_base >= 0xC0000)) {
7574 BNX2X_DEV_INFO("MCP not active\n");
7575 bp->flags |= NO_MCP_FLAG;
7576 return;
7577 }
7578
7579 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7580 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7581 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7582 BNX2X_ERR("BAD MCP validity signature\n");
7583
7584 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007585 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007586
7587 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7588 SHARED_HW_CFG_LED_MODE_MASK) >>
7589 SHARED_HW_CFG_LED_MODE_SHIFT);
7590
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007591 bp->link_params.feature_config_flags = 0;
7592 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7593 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7594 bp->link_params.feature_config_flags |=
7595 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7596 else
7597 bp->link_params.feature_config_flags &=
7598 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007600 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7601 bp->common.bc_ver = val;
7602 BNX2X_DEV_INFO("bc_ver %X\n", val);
7603 if (val < BNX2X_BC_VER) {
7604 /* for now only warn
7605 * later we might need to enforce this */
7606 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7607 " please upgrade BC\n", BNX2X_BC_VER, val);
7608 }
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007609
7610 if (BP_E1HVN(bp) == 0) {
7611 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7612 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7613 } else {
7614 /* no WOL capability for E1HVN != 0 */
7615 bp->flags |= NO_WOL_FLAG;
7616 }
7617 BNX2X_DEV_INFO("%sWoL capable\n",
7618 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007619
7620 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7621 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7622 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7623 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7624
7625 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7626 val, val2, val3, val4);
7627}
7628
7629static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7630 u32 switch_cfg)
7631{
7632 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007633 u32 ext_phy_type;
7634
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007635 switch (switch_cfg) {
7636 case SWITCH_CFG_1G:
7637 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7638
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007639 ext_phy_type =
7640 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007641 switch (ext_phy_type) {
7642 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7643 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7644 ext_phy_type);
7645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007646 bp->port.supported |= (SUPPORTED_10baseT_Half |
7647 SUPPORTED_10baseT_Full |
7648 SUPPORTED_100baseT_Half |
7649 SUPPORTED_100baseT_Full |
7650 SUPPORTED_1000baseT_Full |
7651 SUPPORTED_2500baseX_Full |
7652 SUPPORTED_TP |
7653 SUPPORTED_FIBRE |
7654 SUPPORTED_Autoneg |
7655 SUPPORTED_Pause |
7656 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007657 break;
7658
7659 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7660 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7661 ext_phy_type);
7662
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007663 bp->port.supported |= (SUPPORTED_10baseT_Half |
7664 SUPPORTED_10baseT_Full |
7665 SUPPORTED_100baseT_Half |
7666 SUPPORTED_100baseT_Full |
7667 SUPPORTED_1000baseT_Full |
7668 SUPPORTED_TP |
7669 SUPPORTED_FIBRE |
7670 SUPPORTED_Autoneg |
7671 SUPPORTED_Pause |
7672 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007673 break;
7674
7675 default:
7676 BNX2X_ERR("NVRAM config error. "
7677 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007678 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007679 return;
7680 }
7681
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007682 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7683 port*0x10);
7684 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007685 break;
7686
7687 case SWITCH_CFG_10G:
7688 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7689
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007690 ext_phy_type =
7691 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007692 switch (ext_phy_type) {
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7695 ext_phy_type);
7696
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007697 bp->port.supported |= (SUPPORTED_10baseT_Half |
7698 SUPPORTED_10baseT_Full |
7699 SUPPORTED_100baseT_Half |
7700 SUPPORTED_100baseT_Full |
7701 SUPPORTED_1000baseT_Full |
7702 SUPPORTED_2500baseX_Full |
7703 SUPPORTED_10000baseT_Full |
7704 SUPPORTED_TP |
7705 SUPPORTED_FIBRE |
7706 SUPPORTED_Autoneg |
7707 SUPPORTED_Pause |
7708 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007709 break;
7710
Eliezer Tamirf1410642008-02-28 11:51:50 -08007711 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7712 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7713 ext_phy_type);
7714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007715 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7716 SUPPORTED_1000baseT_Full |
7717 SUPPORTED_FIBRE |
7718 SUPPORTED_Autoneg |
7719 SUPPORTED_Pause |
7720 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007721 break;
7722
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7724 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7725 ext_phy_type);
7726
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007727 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7728 SUPPORTED_2500baseX_Full |
7729 SUPPORTED_1000baseT_Full |
7730 SUPPORTED_FIBRE |
7731 SUPPORTED_Autoneg |
7732 SUPPORTED_Pause |
7733 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007734 break;
7735
Eilon Greenstein589abe32009-02-12 08:36:55 +00007736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7737 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7738 ext_phy_type);
7739
7740 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7741 SUPPORTED_FIBRE |
7742 SUPPORTED_Pause |
7743 SUPPORTED_Asym_Pause);
7744 break;
7745
7746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7747 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7748 ext_phy_type);
7749
7750 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7751 SUPPORTED_1000baseT_Full |
7752 SUPPORTED_FIBRE |
7753 SUPPORTED_Pause |
7754 SUPPORTED_Asym_Pause);
7755 break;
7756
7757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7758 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7759 ext_phy_type);
7760
7761 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7762 SUPPORTED_1000baseT_Full |
7763 SUPPORTED_Autoneg |
7764 SUPPORTED_FIBRE |
7765 SUPPORTED_Pause |
7766 SUPPORTED_Asym_Pause);
7767 break;
7768
Eliezer Tamirf1410642008-02-28 11:51:50 -08007769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7770 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7771 ext_phy_type);
7772
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007773 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7774 SUPPORTED_TP |
7775 SUPPORTED_Autoneg |
7776 SUPPORTED_Pause |
7777 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007778 break;
7779
Eilon Greenstein28577182009-02-12 08:37:00 +00007780 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7781 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7782 ext_phy_type);
7783
7784 bp->port.supported |= (SUPPORTED_10baseT_Half |
7785 SUPPORTED_10baseT_Full |
7786 SUPPORTED_100baseT_Half |
7787 SUPPORTED_100baseT_Full |
7788 SUPPORTED_1000baseT_Full |
7789 SUPPORTED_10000baseT_Full |
7790 SUPPORTED_TP |
7791 SUPPORTED_Autoneg |
7792 SUPPORTED_Pause |
7793 SUPPORTED_Asym_Pause);
7794 break;
7795
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007796 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7797 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7798 bp->link_params.ext_phy_config);
7799 break;
7800
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007801 default:
7802 BNX2X_ERR("NVRAM config error. "
7803 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007804 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007805 return;
7806 }
7807
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007808 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7809 port*0x18);
7810 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007811
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007812 break;
7813
7814 default:
7815 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007816 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007817 return;
7818 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007819 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007820
7821 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007822 if (!(bp->link_params.speed_cap_mask &
7823 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007824 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007825
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007826 if (!(bp->link_params.speed_cap_mask &
7827 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007828 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007829
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007832 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007833
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007836 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007837
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007840 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7841 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007842
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007843 if (!(bp->link_params.speed_cap_mask &
7844 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007845 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007846
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007847 if (!(bp->link_params.speed_cap_mask &
7848 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007849 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007851 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007852}
7853
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007854static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007855{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007856 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007857
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007858 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007859 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007860 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007861 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007862 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007863 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007864 u32 ext_phy_type =
7865 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7866
7867 if ((ext_phy_type ==
7868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7869 (ext_phy_type ==
7870 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007871 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007872 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007873 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007874 (ADVERTISED_10000baseT_Full |
7875 ADVERTISED_FIBRE);
7876 break;
7877 }
7878 BNX2X_ERR("NVRAM config error. "
7879 "Invalid link_config 0x%x"
7880 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007881 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007882 return;
7883 }
7884 break;
7885
7886 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007887 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007888 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007889 bp->port.advertising = (ADVERTISED_10baseT_Full |
7890 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007891 } else {
7892 BNX2X_ERR("NVRAM config error. "
7893 "Invalid link_config 0x%x"
7894 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007895 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007896 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007897 return;
7898 }
7899 break;
7900
7901 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007902 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007903 bp->link_params.req_line_speed = SPEED_10;
7904 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007905 bp->port.advertising = (ADVERTISED_10baseT_Half |
7906 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007907 } else {
7908 BNX2X_ERR("NVRAM config error. "
7909 "Invalid link_config 0x%x"
7910 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007911 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007912 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007913 return;
7914 }
7915 break;
7916
7917 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007918 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007919 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007920 bp->port.advertising = (ADVERTISED_100baseT_Full |
7921 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007922 } else {
7923 BNX2X_ERR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007926 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007927 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007928 return;
7929 }
7930 break;
7931
7932 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007933 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007934 bp->link_params.req_line_speed = SPEED_100;
7935 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007936 bp->port.advertising = (ADVERTISED_100baseT_Half |
7937 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007938 } else {
7939 BNX2X_ERR("NVRAM config error. "
7940 "Invalid link_config 0x%x"
7941 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007942 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007943 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007944 return;
7945 }
7946 break;
7947
7948 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007949 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007950 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007951 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7952 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007953 } else {
7954 BNX2X_ERR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007957 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007958 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007959 return;
7960 }
7961 break;
7962
7963 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007964 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007965 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007966 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7967 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007968 } else {
7969 BNX2X_ERR("NVRAM config error. "
7970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007972 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007973 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007974 return;
7975 }
7976 break;
7977
7978 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7979 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7980 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007981 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007982 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007983 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7984 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007985 } else {
7986 BNX2X_ERR("NVRAM config error. "
7987 "Invalid link_config 0x%x"
7988 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007990 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007991 return;
7992 }
7993 break;
7994
7995 default:
7996 BNX2X_ERR("NVRAM config error. "
7997 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007998 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007999 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008000 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001 break;
8002 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008003
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008004 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8005 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008006 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008007 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008008 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008009
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008010 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008011 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008012 bp->link_params.req_line_speed,
8013 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008014 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008015}
8016
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008017static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008018{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008019 int port = BP_PORT(bp);
8020 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008021 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008022 u16 i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008023
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008024 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008025 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008026
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008027 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008028 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008029 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008030 SHMEM_RD(bp,
8031 dev_info.port_hw_config[port].external_phy_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008032 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].speed_capability_mask);
8035
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008036 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008037 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8038
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008039 /* Get the 4 lanes xgxs config rx and tx */
8040 for (i = 0; i < 2; i++) {
8041 val = SHMEM_RD(bp,
8042 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8043 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8044 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8045
8046 val = SHMEM_RD(bp,
8047 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8048 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8049 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8050 }
8051
Eilon Greenstein589abe32009-02-12 08:36:55 +00008052 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8053 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8054 bp->link_params.feature_config_flags |=
8055 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8056 else
8057 bp->link_params.feature_config_flags &=
8058 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008060 /* If the device is capable of WoL, set the default state according
8061 * to the HW
8062 */
8063 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8064 (config & PORT_FEATURE_WOL_ENABLED));
8065
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008066 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8067 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008068 bp->link_params.lane_config,
8069 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008070 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008071
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008072 bp->link_params.switch_cfg = (bp->port.link_config &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008073 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8074 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008075
8076 bnx2x_link_settings_requested(bp);
8077
8078 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8079 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8080 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8081 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8082 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8083 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8084 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8085 bp->dev->dev_addr[5] = (u8)(val & 0xff);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008086 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8087 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008088}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008089
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008090static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8091{
8092 int func = BP_FUNC(bp);
8093 u32 val, val2;
8094 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008095
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008096 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008097
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008098 bp->e1hov = 0;
8099 bp->e1hmf = 0;
8100 if (CHIP_IS_E1H(bp)) {
8101 bp->mf_config =
8102 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008103
Eilon Greenstein3196a882008-08-13 15:58:49 -07008104 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8105 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008106 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008107
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008108 bp->e1hov = val;
8109 bp->e1hmf = 1;
8110 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8111 "(0x%04x)\n",
8112 func, bp->e1hov, bp->e1hov);
8113 } else {
8114 BNX2X_DEV_INFO("Single function mode\n");
8115 if (BP_E1HVN(bp)) {
8116 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8117 " aborting\n", func);
8118 rc = -EPERM;
8119 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008120 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008121 }
8122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008123 if (!BP_NOMCP(bp)) {
8124 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008125
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008126 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8127 DRV_MSG_SEQ_NUMBER_MASK);
8128 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8129 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008131 if (IS_E1HMF(bp)) {
8132 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8133 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8134 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8135 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8136 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8137 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8138 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8139 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8140 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8141 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8143 ETH_ALEN);
8144 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8145 ETH_ALEN);
8146 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008147
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008148 return rc;
8149 }
8150
8151 if (BP_NOMCP(bp)) {
8152 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008153 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008154 random_ether_addr(bp->dev->dev_addr);
8155 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8156 }
8157
8158 return rc;
8159}
8160
8161static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8162{
8163 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008164 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008165 int rc;
8166
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008167 /* Disable interrupt handling until HW is initialized */
8168 atomic_set(&bp->intr_sem, 1);
8169
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008170 mutex_init(&bp->port.phy_mutex);
8171
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008172 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008173 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8174
8175 rc = bnx2x_get_hwinfo(bp);
8176
8177 /* need to reset chip if undi was active */
8178 if (!BP_NOMCP(bp))
8179 bnx2x_undi_unload(bp);
8180
8181 if (CHIP_REV_IS_FPGA(bp))
8182 printk(KERN_ERR PFX "FPGA detected\n");
8183
8184 if (BP_NOMCP(bp) && (func == 0))
8185 printk(KERN_ERR PFX
8186 "MCP disabled, must load devices in order!\n");
8187
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008188 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008189 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8190 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008191 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008192 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008193 multi_mode = ETH_RSS_MODE_DISABLED;
8194 }
8195 bp->multi_mode = multi_mode;
8196
8197
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008198 /* Set TPA flags */
8199 if (disable_tpa) {
8200 bp->flags &= ~TPA_ENABLE_FLAG;
8201 bp->dev->features &= ~NETIF_F_LRO;
8202 } else {
8203 bp->flags |= TPA_ENABLE_FLAG;
8204 bp->dev->features |= NETIF_F_LRO;
8205 }
8206
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008207 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008208
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008209 bp->tx_ring_size = MAX_TX_AVAIL;
8210 bp->rx_ring_size = MAX_RX_AVAIL;
8211
8212 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008213
8214 bp->tx_ticks = 50;
8215 bp->rx_ticks = 25;
8216
Eilon Greenstein87942b42009-02-12 08:36:49 +00008217 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8218 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008219
8220 init_timer(&bp->timer);
8221 bp->timer.expires = jiffies + bp->current_interval;
8222 bp->timer.data = (unsigned long) bp;
8223 bp->timer.function = bnx2x_timer;
8224
8225 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008226}
8227
8228/*
8229 * ethtool service functions
8230 */
8231
8232/* All ethtool functions called with rtnl_lock */
8233
8234static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8235{
8236 struct bnx2x *bp = netdev_priv(dev);
8237
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008238 cmd->supported = bp->port.supported;
8239 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008240
8241 if (netif_carrier_ok(dev)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008242 cmd->speed = bp->link_vars.line_speed;
8243 cmd->duplex = bp->link_vars.duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008244 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008245 cmd->speed = bp->link_params.req_line_speed;
8246 cmd->duplex = bp->link_params.req_duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008247 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008248 if (IS_E1HMF(bp)) {
8249 u16 vn_max_rate;
8250
8251 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8252 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8253 if (vn_max_rate < cmd->speed)
8254 cmd->speed = vn_max_rate;
8255 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008256
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008257 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8258 u32 ext_phy_type =
8259 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008260
8261 switch (ext_phy_type) {
8262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008264 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00008265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008268 cmd->port = PORT_FIBRE;
8269 break;
8270
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00008272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008273 cmd->port = PORT_TP;
8274 break;
8275
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8277 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8278 bp->link_params.ext_phy_config);
8279 break;
8280
Eliezer Tamirf1410642008-02-28 11:51:50 -08008281 default:
8282 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008283 bp->link_params.ext_phy_config);
8284 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008285 }
8286 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008287 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008288
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008289 cmd->phy_address = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008290 cmd->transceiver = XCVR_INTERNAL;
8291
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008292 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008293 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008294 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008295 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008296
8297 cmd->maxtxpkt = 0;
8298 cmd->maxrxpkt = 0;
8299
8300 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8301 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8302 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8303 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8304 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8305 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8306 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8307
8308 return 0;
8309}
8310
8311static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8312{
8313 struct bnx2x *bp = netdev_priv(dev);
8314 u32 advertising;
8315
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008316 if (IS_E1HMF(bp))
8317 return 0;
8318
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008319 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8320 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8321 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8322 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8323 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8324 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8325 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8326
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008327 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008328 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8329 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008330 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008331 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008332
8333 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008334 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008335
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008336 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8337 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008338 bp->port.advertising |= (ADVERTISED_Autoneg |
8339 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008340
8341 } else { /* forced speed */
8342 /* advertise the requested speed and duplex if supported */
8343 switch (cmd->speed) {
8344 case SPEED_10:
8345 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008346 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008347 SUPPORTED_10baseT_Full)) {
8348 DP(NETIF_MSG_LINK,
8349 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008350 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008351 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008352
8353 advertising = (ADVERTISED_10baseT_Full |
8354 ADVERTISED_TP);
8355 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008356 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008357 SUPPORTED_10baseT_Half)) {
8358 DP(NETIF_MSG_LINK,
8359 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008360 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008361 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008362
8363 advertising = (ADVERTISED_10baseT_Half |
8364 ADVERTISED_TP);
8365 }
8366 break;
8367
8368 case SPEED_100:
8369 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008370 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008371 SUPPORTED_100baseT_Full)) {
8372 DP(NETIF_MSG_LINK,
8373 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008374 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008375 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008376
8377 advertising = (ADVERTISED_100baseT_Full |
8378 ADVERTISED_TP);
8379 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008380 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008381 SUPPORTED_100baseT_Half)) {
8382 DP(NETIF_MSG_LINK,
8383 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008384 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008385 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008386
8387 advertising = (ADVERTISED_100baseT_Half |
8388 ADVERTISED_TP);
8389 }
8390 break;
8391
8392 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008393 if (cmd->duplex != DUPLEX_FULL) {
8394 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008395 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008396 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008397
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008398 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008399 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008400 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008401 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008402
8403 advertising = (ADVERTISED_1000baseT_Full |
8404 ADVERTISED_TP);
8405 break;
8406
8407 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008408 if (cmd->duplex != DUPLEX_FULL) {
8409 DP(NETIF_MSG_LINK,
8410 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008411 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008412 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008414 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008415 DP(NETIF_MSG_LINK,
8416 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008417 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008418 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008419
Eliezer Tamirf1410642008-02-28 11:51:50 -08008420 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008421 ADVERTISED_TP);
8422 break;
8423
8424 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008425 if (cmd->duplex != DUPLEX_FULL) {
8426 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008427 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008428 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008429
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008430 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008431 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008432 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008433 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008434
8435 advertising = (ADVERTISED_10000baseT_Full |
8436 ADVERTISED_FIBRE);
8437 break;
8438
8439 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008440 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008441 return -EINVAL;
8442 }
8443
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008444 bp->link_params.req_line_speed = cmd->speed;
8445 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008446 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008447 }
8448
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008449 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008450 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008451 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008452 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008453
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008454 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008455 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008456 bnx2x_link_set(bp);
8457 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008458
8459 return 0;
8460}
8461
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008462#define PHY_FW_VER_LEN 10
8463
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008464static void bnx2x_get_drvinfo(struct net_device *dev,
8465 struct ethtool_drvinfo *info)
8466{
8467 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008468 u8 phy_fw_ver[PHY_FW_VER_LEN];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008469
8470 strcpy(info->driver, DRV_MODULE_NAME);
8471 strcpy(info->version, DRV_MODULE_VERSION);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008472
8473 phy_fw_ver[0] = '\0';
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008474 if (bp->port.pmf) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008475 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008476 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8477 (bp->state != BNX2X_STATE_CLOSED),
8478 phy_fw_ver, PHY_FW_VER_LEN);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008479 bnx2x_release_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008480 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008481
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008482 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8483 (bp->common.bc_ver & 0xff0000) >> 16,
8484 (bp->common.bc_ver & 0xff00) >> 8,
8485 (bp->common.bc_ver & 0xff),
8486 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008487 strcpy(info->bus_info, pci_name(bp->pdev));
8488 info->n_stats = BNX2X_NUM_STATS;
8489 info->testinfo_len = BNX2X_NUM_TESTS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008490 info->eedump_len = bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008491 info->regdump_len = 0;
8492}
8493
8494static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8495{
8496 struct bnx2x *bp = netdev_priv(dev);
8497
8498 if (bp->flags & NO_WOL_FLAG) {
8499 wol->supported = 0;
8500 wol->wolopts = 0;
8501 } else {
8502 wol->supported = WAKE_MAGIC;
8503 if (bp->wol)
8504 wol->wolopts = WAKE_MAGIC;
8505 else
8506 wol->wolopts = 0;
8507 }
8508 memset(&wol->sopass, 0, sizeof(wol->sopass));
8509}
8510
8511static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8512{
8513 struct bnx2x *bp = netdev_priv(dev);
8514
8515 if (wol->wolopts & ~WAKE_MAGIC)
8516 return -EINVAL;
8517
8518 if (wol->wolopts & WAKE_MAGIC) {
8519 if (bp->flags & NO_WOL_FLAG)
8520 return -EINVAL;
8521
8522 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008523 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008524 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008525
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008526 return 0;
8527}
8528
8529static u32 bnx2x_get_msglevel(struct net_device *dev)
8530{
8531 struct bnx2x *bp = netdev_priv(dev);
8532
8533 return bp->msglevel;
8534}
8535
8536static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8537{
8538 struct bnx2x *bp = netdev_priv(dev);
8539
8540 if (capable(CAP_NET_ADMIN))
8541 bp->msglevel = level;
8542}
8543
8544static int bnx2x_nway_reset(struct net_device *dev)
8545{
8546 struct bnx2x *bp = netdev_priv(dev);
8547
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008548 if (!bp->port.pmf)
8549 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008551 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008552 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008553 bnx2x_link_set(bp);
8554 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008555
8556 return 0;
8557}
8558
8559static int bnx2x_get_eeprom_len(struct net_device *dev)
8560{
8561 struct bnx2x *bp = netdev_priv(dev);
8562
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008563 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008564}
8565
8566static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8567{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008568 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008569 int count, i;
8570 u32 val = 0;
8571
8572 /* adjust timeout for emulation/FPGA */
8573 count = NVRAM_TIMEOUT_COUNT;
8574 if (CHIP_REV_IS_SLOW(bp))
8575 count *= 100;
8576
8577 /* request access to nvram interface */
8578 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8579 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8580
8581 for (i = 0; i < count*10; i++) {
8582 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8583 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8584 break;
8585
8586 udelay(5);
8587 }
8588
8589 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008590 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008591 return -EBUSY;
8592 }
8593
8594 return 0;
8595}
8596
8597static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8598{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008599 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600 int count, i;
8601 u32 val = 0;
8602
8603 /* adjust timeout for emulation/FPGA */
8604 count = NVRAM_TIMEOUT_COUNT;
8605 if (CHIP_REV_IS_SLOW(bp))
8606 count *= 100;
8607
8608 /* relinquish nvram interface */
8609 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8610 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8611
8612 for (i = 0; i < count*10; i++) {
8613 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8614 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8615 break;
8616
8617 udelay(5);
8618 }
8619
8620 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008621 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008622 return -EBUSY;
8623 }
8624
8625 return 0;
8626}
8627
8628static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8629{
8630 u32 val;
8631
8632 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8633
8634 /* enable both bits, even on read */
8635 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8636 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8637 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8638}
8639
8640static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8641{
8642 u32 val;
8643
8644 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8645
8646 /* disable both bits, even after read */
8647 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8648 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8649 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8650}
8651
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008652static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008653 u32 cmd_flags)
8654{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008655 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008656 u32 val;
8657
8658 /* build the command word */
8659 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8660
8661 /* need to clear DONE bit separately */
8662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8663
8664 /* address of the NVRAM to read from */
8665 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8666 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8667
8668 /* issue a read command */
8669 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8670
8671 /* adjust timeout for emulation/FPGA */
8672 count = NVRAM_TIMEOUT_COUNT;
8673 if (CHIP_REV_IS_SLOW(bp))
8674 count *= 100;
8675
8676 /* wait for completion */
8677 *ret_val = 0;
8678 rc = -EBUSY;
8679 for (i = 0; i < count; i++) {
8680 udelay(5);
8681 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8682
8683 if (val & MCPR_NVM_COMMAND_DONE) {
8684 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008685 /* we read nvram data in cpu order
8686 * but ethtool sees it as an array of bytes
8687 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008688 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008689 rc = 0;
8690 break;
8691 }
8692 }
8693
8694 return rc;
8695}
8696
8697static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8698 int buf_size)
8699{
8700 int rc;
8701 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008702 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008703
8704 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008705 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008706 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008707 offset, buf_size);
8708 return -EINVAL;
8709 }
8710
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008711 if (offset + buf_size > bp->common.flash_size) {
8712 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008713 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008715 return -EINVAL;
8716 }
8717
8718 /* request access to nvram interface */
8719 rc = bnx2x_acquire_nvram_lock(bp);
8720 if (rc)
8721 return rc;
8722
8723 /* enable access to nvram interface */
8724 bnx2x_enable_nvram_access(bp);
8725
8726 /* read the first word(s) */
8727 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8728 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8729 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8730 memcpy(ret_buf, &val, 4);
8731
8732 /* advance to the next dword */
8733 offset += sizeof(u32);
8734 ret_buf += sizeof(u32);
8735 buf_size -= sizeof(u32);
8736 cmd_flags = 0;
8737 }
8738
8739 if (rc == 0) {
8740 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8741 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8742 memcpy(ret_buf, &val, 4);
8743 }
8744
8745 /* disable access to nvram interface */
8746 bnx2x_disable_nvram_access(bp);
8747 bnx2x_release_nvram_lock(bp);
8748
8749 return rc;
8750}
8751
8752static int bnx2x_get_eeprom(struct net_device *dev,
8753 struct ethtool_eeprom *eeprom, u8 *eebuf)
8754{
8755 struct bnx2x *bp = netdev_priv(dev);
8756 int rc;
8757
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00008758 if (!netif_running(dev))
8759 return -EAGAIN;
8760
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008761 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008762 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8763 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8764 eeprom->len, eeprom->len);
8765
8766 /* parameters already validated in ethtool_get_eeprom */
8767
8768 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8769
8770 return rc;
8771}
8772
8773static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8774 u32 cmd_flags)
8775{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008776 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008777
8778 /* build the command word */
8779 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8780
8781 /* need to clear DONE bit separately */
8782 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8783
8784 /* write the data */
8785 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8786
8787 /* address of the NVRAM to write to */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8789 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8790
8791 /* issue the write command */
8792 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8793
8794 /* adjust timeout for emulation/FPGA */
8795 count = NVRAM_TIMEOUT_COUNT;
8796 if (CHIP_REV_IS_SLOW(bp))
8797 count *= 100;
8798
8799 /* wait for completion */
8800 rc = -EBUSY;
8801 for (i = 0; i < count; i++) {
8802 udelay(5);
8803 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8804 if (val & MCPR_NVM_COMMAND_DONE) {
8805 rc = 0;
8806 break;
8807 }
8808 }
8809
8810 return rc;
8811}
8812
Eliezer Tamirf1410642008-02-28 11:51:50 -08008813#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008814
8815static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8816 int buf_size)
8817{
8818 int rc;
8819 u32 cmd_flags;
8820 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008821 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008822
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008823 if (offset + buf_size > bp->common.flash_size) {
8824 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008825 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008826 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008827 return -EINVAL;
8828 }
8829
8830 /* request access to nvram interface */
8831 rc = bnx2x_acquire_nvram_lock(bp);
8832 if (rc)
8833 return rc;
8834
8835 /* enable access to nvram interface */
8836 bnx2x_enable_nvram_access(bp);
8837
8838 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8839 align_offset = (offset & ~0x03);
8840 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8841
8842 if (rc == 0) {
8843 val &= ~(0xff << BYTE_OFFSET(offset));
8844 val |= (*data_buf << BYTE_OFFSET(offset));
8845
8846 /* nvram data is returned as an array of bytes
8847 * convert it back to cpu order */
8848 val = be32_to_cpu(val);
8849
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008850 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8851 cmd_flags);
8852 }
8853
8854 /* disable access to nvram interface */
8855 bnx2x_disable_nvram_access(bp);
8856 bnx2x_release_nvram_lock(bp);
8857
8858 return rc;
8859}
8860
8861static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8862 int buf_size)
8863{
8864 int rc;
8865 u32 cmd_flags;
8866 u32 val;
8867 u32 written_so_far;
8868
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008869 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008870 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008871
8872 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008873 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008874 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008875 offset, buf_size);
8876 return -EINVAL;
8877 }
8878
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 if (offset + buf_size > bp->common.flash_size) {
8880 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008881 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008882 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008883 return -EINVAL;
8884 }
8885
8886 /* request access to nvram interface */
8887 rc = bnx2x_acquire_nvram_lock(bp);
8888 if (rc)
8889 return rc;
8890
8891 /* enable access to nvram interface */
8892 bnx2x_enable_nvram_access(bp);
8893
8894 written_so_far = 0;
8895 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8896 while ((written_so_far < buf_size) && (rc == 0)) {
8897 if (written_so_far == (buf_size - sizeof(u32)))
8898 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8899 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8900 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8901 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8902 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8903
8904 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008905
8906 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8907
8908 /* advance to the next dword */
8909 offset += sizeof(u32);
8910 data_buf += sizeof(u32);
8911 written_so_far += sizeof(u32);
8912 cmd_flags = 0;
8913 }
8914
8915 /* disable access to nvram interface */
8916 bnx2x_disable_nvram_access(bp);
8917 bnx2x_release_nvram_lock(bp);
8918
8919 return rc;
8920}
8921
8922static int bnx2x_set_eeprom(struct net_device *dev,
8923 struct ethtool_eeprom *eeprom, u8 *eebuf)
8924{
8925 struct bnx2x *bp = netdev_priv(dev);
8926 int rc;
8927
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08008928 if (!netif_running(dev))
8929 return -EAGAIN;
8930
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008931 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008932 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8933 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8934 eeprom->len, eeprom->len);
8935
8936 /* parameters already validated in ethtool_set_eeprom */
8937
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008938 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008939 if (eeprom->magic == 0x00504859)
8940 if (bp->port.pmf) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008941
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008942 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008943 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8944 bp->link_params.ext_phy_config,
8945 (bp->state != BNX2X_STATE_CLOSED),
8946 eebuf, eeprom->len);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008947 if ((bp->state == BNX2X_STATE_OPEN) ||
8948 (bp->state == BNX2X_STATE_DISABLED)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008949 rc |= bnx2x_link_reset(&bp->link_params,
Eilon Greenstein589abe32009-02-12 08:36:55 +00008950 &bp->link_vars, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008951 rc |= bnx2x_phy_init(&bp->link_params,
8952 &bp->link_vars);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008953 }
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008954 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008955
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008956 } else /* Only the PMF can access the PHY */
8957 return -EINVAL;
8958 else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008959 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008960
8961 return rc;
8962}
8963
8964static int bnx2x_get_coalesce(struct net_device *dev,
8965 struct ethtool_coalesce *coal)
8966{
8967 struct bnx2x *bp = netdev_priv(dev);
8968
8969 memset(coal, 0, sizeof(struct ethtool_coalesce));
8970
8971 coal->rx_coalesce_usecs = bp->rx_ticks;
8972 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008973
8974 return 0;
8975}
8976
8977static int bnx2x_set_coalesce(struct net_device *dev,
8978 struct ethtool_coalesce *coal)
8979{
8980 struct bnx2x *bp = netdev_priv(dev);
8981
8982 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8983 if (bp->rx_ticks > 3000)
8984 bp->rx_ticks = 3000;
8985
8986 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8987 if (bp->tx_ticks > 0x3000)
8988 bp->tx_ticks = 0x3000;
8989
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008990 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008991 bnx2x_update_coalesce(bp);
8992
8993 return 0;
8994}
8995
8996static void bnx2x_get_ringparam(struct net_device *dev,
8997 struct ethtool_ringparam *ering)
8998{
8999 struct bnx2x *bp = netdev_priv(dev);
9000
9001 ering->rx_max_pending = MAX_RX_AVAIL;
9002 ering->rx_mini_max_pending = 0;
9003 ering->rx_jumbo_max_pending = 0;
9004
9005 ering->rx_pending = bp->rx_ring_size;
9006 ering->rx_mini_pending = 0;
9007 ering->rx_jumbo_pending = 0;
9008
9009 ering->tx_max_pending = MAX_TX_AVAIL;
9010 ering->tx_pending = bp->tx_ring_size;
9011}
9012
9013static int bnx2x_set_ringparam(struct net_device *dev,
9014 struct ethtool_ringparam *ering)
9015{
9016 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009017 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009018
9019 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9020 (ering->tx_pending > MAX_TX_AVAIL) ||
9021 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9022 return -EINVAL;
9023
9024 bp->rx_ring_size = ering->rx_pending;
9025 bp->tx_ring_size = ering->tx_pending;
9026
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009027 if (netif_running(dev)) {
9028 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9029 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009030 }
9031
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009032 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033}
9034
9035static void bnx2x_get_pauseparam(struct net_device *dev,
9036 struct ethtool_pauseparam *epause)
9037{
9038 struct bnx2x *bp = netdev_priv(dev);
9039
David S. Millerc0700f92008-12-16 23:53:20 -08009040 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009041 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9042
David S. Millerc0700f92008-12-16 23:53:20 -08009043 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9044 BNX2X_FLOW_CTRL_RX);
9045 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9046 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009047
9048 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9049 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9050 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9051}
9052
9053static int bnx2x_set_pauseparam(struct net_device *dev,
9054 struct ethtool_pauseparam *epause)
9055{
9056 struct bnx2x *bp = netdev_priv(dev);
9057
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009058 if (IS_E1HMF(bp))
9059 return 0;
9060
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009061 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9062 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9063 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9064
David S. Millerc0700f92008-12-16 23:53:20 -08009065 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009066
9067 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009068 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009069
9070 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009071 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009072
David S. Millerc0700f92008-12-16 23:53:20 -08009073 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9074 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009075
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009076 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009077 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009078 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009079 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009080 }
9081
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009082 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009083 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009084 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009085
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009086 DP(NETIF_MSG_LINK,
9087 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009088
9089 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009090 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009091 bnx2x_link_set(bp);
9092 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009093
9094 return 0;
9095}
9096
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009097static int bnx2x_set_flags(struct net_device *dev, u32 data)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
9100 int changed = 0;
9101 int rc = 0;
9102
9103 /* TPA requires Rx CSUM offloading */
9104 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9105 if (!(dev->features & NETIF_F_LRO)) {
9106 dev->features |= NETIF_F_LRO;
9107 bp->flags |= TPA_ENABLE_FLAG;
9108 changed = 1;
9109 }
9110
9111 } else if (dev->features & NETIF_F_LRO) {
9112 dev->features &= ~NETIF_F_LRO;
9113 bp->flags &= ~TPA_ENABLE_FLAG;
9114 changed = 1;
9115 }
9116
9117 if (changed && netif_running(dev)) {
9118 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9119 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9120 }
9121
9122 return rc;
9123}
9124
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009125static u32 bnx2x_get_rx_csum(struct net_device *dev)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128
9129 return bp->rx_csum;
9130}
9131
9132static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9133{
9134 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009135 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009136
9137 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009138
9139 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9140 TPA'ed packets will be discarded due to wrong TCP CSUM */
9141 if (!data) {
9142 u32 flags = ethtool_op_get_flags(dev);
9143
9144 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9145 }
9146
9147 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009148}
9149
9150static int bnx2x_set_tso(struct net_device *dev, u32 data)
9151{
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009152 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009153 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009154 dev->features |= NETIF_F_TSO6;
9155 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009156 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009157 dev->features &= ~NETIF_F_TSO6;
9158 }
9159
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009160 return 0;
9161}
9162
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009163static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009164 char string[ETH_GSTRING_LEN];
9165} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009166 { "register_test (offline)" },
9167 { "memory_test (offline)" },
9168 { "loopback_test (offline)" },
9169 { "nvram_test (online)" },
9170 { "interrupt_test (online)" },
9171 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +00009172 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009173};
9174
9175static int bnx2x_self_test_count(struct net_device *dev)
9176{
9177 return BNX2X_NUM_TESTS;
9178}
9179
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009180static int bnx2x_test_registers(struct bnx2x *bp)
9181{
9182 int idx, i, rc = -ENODEV;
9183 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009184 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009185 static const struct {
9186 u32 offset0;
9187 u32 offset1;
9188 u32 mask;
9189 } reg_tbl[] = {
9190/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9191 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9192 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9193 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9194 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9195 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9196 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9197 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9198 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9199 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9200/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9201 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9202 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9203 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9204 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9205 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9206 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9207 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9208 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9209 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9210/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9211 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9212 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9213 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9214 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9215 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9216 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9217 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9218 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9219 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9220/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9221 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9222 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9223 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9224 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9225 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9226 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9227 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9228
9229 { 0xffffffff, 0, 0x00000000 }
9230 };
9231
9232 if (!netif_running(bp->dev))
9233 return rc;
9234
9235 /* Repeat the test twice:
9236 First by writing 0x00000000, second by writing 0xffffffff */
9237 for (idx = 0; idx < 2; idx++) {
9238
9239 switch (idx) {
9240 case 0:
9241 wr_val = 0;
9242 break;
9243 case 1:
9244 wr_val = 0xffffffff;
9245 break;
9246 }
9247
9248 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9249 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009250
9251 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9252 mask = reg_tbl[i].mask;
9253
9254 save_val = REG_RD(bp, offset);
9255
9256 REG_WR(bp, offset, wr_val);
9257 val = REG_RD(bp, offset);
9258
9259 /* Restore the original register's value */
9260 REG_WR(bp, offset, save_val);
9261
9262 /* verify that value is as expected value */
9263 if ((val & mask) != (wr_val & mask))
9264 goto test_reg_exit;
9265 }
9266 }
9267
9268 rc = 0;
9269
9270test_reg_exit:
9271 return rc;
9272}
9273
9274static int bnx2x_test_memory(struct bnx2x *bp)
9275{
9276 int i, j, rc = -ENODEV;
9277 u32 val;
9278 static const struct {
9279 u32 offset;
9280 int size;
9281 } mem_tbl[] = {
9282 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9283 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9284 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9285 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9286 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9287 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9288 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9289
9290 { 0xffffffff, 0 }
9291 };
9292 static const struct {
9293 char *name;
9294 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009295 u32 e1_mask;
9296 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009297 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009298 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9299 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9300 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9301 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9302 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9303 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009304
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009305 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009306 };
9307
9308 if (!netif_running(bp->dev))
9309 return rc;
9310
9311 /* Go through all the memories */
9312 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9313 for (j = 0; j < mem_tbl[i].size; j++)
9314 REG_RD(bp, mem_tbl[i].offset + j*4);
9315
9316 /* Check the parity status */
9317 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9318 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009319 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9320 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009321 DP(NETIF_MSG_HW,
9322 "%s is 0x%x\n", prty_tbl[i].name, val);
9323 goto test_mem_exit;
9324 }
9325 }
9326
9327 rc = 0;
9328
9329test_mem_exit:
9330 return rc;
9331}
9332
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009333static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9334{
9335 int cnt = 1000;
9336
9337 if (link_up)
9338 while (bnx2x_link_test(bp) && cnt--)
9339 msleep(10);
9340}
9341
9342static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9343{
9344 unsigned int pkt_size, num_pkts, i;
9345 struct sk_buff *skb;
9346 unsigned char *packet;
9347 struct bnx2x_fastpath *fp = &bp->fp[0];
9348 u16 tx_start_idx, tx_idx;
9349 u16 rx_start_idx, rx_idx;
9350 u16 pkt_prod;
9351 struct sw_tx_bd *tx_buf;
9352 struct eth_tx_bd *tx_bd;
9353 dma_addr_t mapping;
9354 union eth_rx_cqe *cqe;
9355 u8 cqe_fp_flags;
9356 struct sw_rx_bd *rx_buf;
9357 u16 len;
9358 int rc = -ENODEV;
9359
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009360 /* check the loopback mode */
9361 switch (loopback_mode) {
9362 case BNX2X_PHY_LOOPBACK:
9363 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9364 return -EINVAL;
9365 break;
9366 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009367 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009368 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009369 break;
9370 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009371 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009372 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009373
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009374 /* prepare the loopback packet */
9375 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9376 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009377 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9378 if (!skb) {
9379 rc = -ENOMEM;
9380 goto test_loopback_exit;
9381 }
9382 packet = skb_put(skb, pkt_size);
9383 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9384 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9385 for (i = ETH_HLEN; i < pkt_size; i++)
9386 packet[i] = (unsigned char) (i & 0xff);
9387
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009388 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009389 num_pkts = 0;
9390 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9391 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9392
9393 pkt_prod = fp->tx_pkt_prod++;
9394 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9395 tx_buf->first_bd = fp->tx_bd_prod;
9396 tx_buf->skb = skb;
9397
9398 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9399 mapping = pci_map_single(bp->pdev, skb->data,
9400 skb_headlen(skb), PCI_DMA_TODEVICE);
9401 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9402 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9403 tx_bd->nbd = cpu_to_le16(1);
9404 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9405 tx_bd->vlan = cpu_to_le16(pkt_prod);
9406 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9407 ETH_TX_BD_FLAGS_END_BD);
9408 tx_bd->general_data = ((UNICAST_ADDRESS <<
9409 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9410
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08009411 wmb();
9412
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009413 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009414 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009415 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +00009416 DOORBELL(bp, fp->index, 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009417
9418 mmiowb();
9419
9420 num_pkts++;
9421 fp->tx_bd_prod++;
9422 bp->dev->trans_start = jiffies;
9423
9424 udelay(100);
9425
9426 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9427 if (tx_idx != tx_start_idx + num_pkts)
9428 goto test_loopback_exit;
9429
9430 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9431 if (rx_idx != rx_start_idx + num_pkts)
9432 goto test_loopback_exit;
9433
9434 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9435 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9436 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9437 goto test_loopback_rx_exit;
9438
9439 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9440 if (len != pkt_size)
9441 goto test_loopback_rx_exit;
9442
9443 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9444 skb = rx_buf->skb;
9445 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9446 for (i = ETH_HLEN; i < pkt_size; i++)
9447 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9448 goto test_loopback_rx_exit;
9449
9450 rc = 0;
9451
9452test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009453
9454 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9455 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9456 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9457 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9458
9459 /* Update producers */
9460 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9461 fp->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009462
9463test_loopback_exit:
9464 bp->link_params.loopback_mode = LOOPBACK_NONE;
9465
9466 return rc;
9467}
9468
9469static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9470{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009471 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009472
9473 if (!netif_running(bp->dev))
9474 return BNX2X_LOOPBACK_FAILED;
9475
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009476 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +00009477 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009478
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009479 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9480 if (res) {
9481 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9482 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009483 }
9484
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009485 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9486 if (res) {
9487 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9488 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009489 }
9490
Eilon Greenstein3910c8a2009-01-22 06:01:32 +00009491 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009492 bnx2x_netif_start(bp);
9493
9494 return rc;
9495}
9496
9497#define CRC32_RESIDUAL 0xdebb20e3
9498
9499static int bnx2x_test_nvram(struct bnx2x *bp)
9500{
9501 static const struct {
9502 int offset;
9503 int size;
9504 } nvram_tbl[] = {
9505 { 0, 0x14 }, /* bootstrap */
9506 { 0x14, 0xec }, /* dir */
9507 { 0x100, 0x350 }, /* manuf_info */
9508 { 0x450, 0xf0 }, /* feature_info */
9509 { 0x640, 0x64 }, /* upgrade_key_info */
9510 { 0x6a4, 0x64 },
9511 { 0x708, 0x70 }, /* manuf_key_info */
9512 { 0x778, 0x70 },
9513 { 0, 0 }
9514 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009515 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009516 u8 *data = (u8 *)buf;
9517 int i, rc;
9518 u32 magic, csum;
9519
9520 rc = bnx2x_nvram_read(bp, 0, data, 4);
9521 if (rc) {
9522 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9523 goto test_nvram_exit;
9524 }
9525
9526 magic = be32_to_cpu(buf[0]);
9527 if (magic != 0x669955aa) {
9528 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9529 rc = -ENODEV;
9530 goto test_nvram_exit;
9531 }
9532
9533 for (i = 0; nvram_tbl[i].size; i++) {
9534
9535 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9536 nvram_tbl[i].size);
9537 if (rc) {
9538 DP(NETIF_MSG_PROBE,
9539 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9540 goto test_nvram_exit;
9541 }
9542
9543 csum = ether_crc_le(nvram_tbl[i].size, data);
9544 if (csum != CRC32_RESIDUAL) {
9545 DP(NETIF_MSG_PROBE,
9546 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9547 rc = -ENODEV;
9548 goto test_nvram_exit;
9549 }
9550 }
9551
9552test_nvram_exit:
9553 return rc;
9554}
9555
9556static int bnx2x_test_intr(struct bnx2x *bp)
9557{
9558 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9559 int i, rc;
9560
9561 if (!netif_running(bp->dev))
9562 return -ENODEV;
9563
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08009564 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +00009565 if (CHIP_IS_E1(bp))
9566 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9567 else
9568 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00009569 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009570 config->hdr.reserved1 = 0;
9571
9572 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9573 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9574 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9575 if (rc == 0) {
9576 bp->set_mac_pending++;
9577 for (i = 0; i < 10; i++) {
9578 if (!bp->set_mac_pending)
9579 break;
9580 msleep_interruptible(10);
9581 }
9582 if (i == 10)
9583 rc = -ENODEV;
9584 }
9585
9586 return rc;
9587}
9588
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009589static void bnx2x_self_test(struct net_device *dev,
9590 struct ethtool_test *etest, u64 *buf)
9591{
9592 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009593
9594 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9595
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009596 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009597 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009598
Eilon Greenstein33471622008-08-13 15:59:08 -07009599 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009600 if (IS_E1HMF(bp))
9601 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9602
9603 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9604 u8 link_up;
9605
9606 link_up = bp->link_vars.link_up;
9607 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9608 bnx2x_nic_load(bp, LOAD_DIAG);
9609 /* wait until link state is restored */
9610 bnx2x_wait_for_link(bp, link_up);
9611
9612 if (bnx2x_test_registers(bp) != 0) {
9613 buf[0] = 1;
9614 etest->flags |= ETH_TEST_FL_FAILED;
9615 }
9616 if (bnx2x_test_memory(bp) != 0) {
9617 buf[1] = 1;
9618 etest->flags |= ETH_TEST_FL_FAILED;
9619 }
9620 buf[2] = bnx2x_test_loopback(bp, link_up);
9621 if (buf[2] != 0)
9622 etest->flags |= ETH_TEST_FL_FAILED;
9623
9624 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9625 bnx2x_nic_load(bp, LOAD_NORMAL);
9626 /* wait until link state is restored */
9627 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009628 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009629 if (bnx2x_test_nvram(bp) != 0) {
9630 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009631 etest->flags |= ETH_TEST_FL_FAILED;
9632 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009633 if (bnx2x_test_intr(bp) != 0) {
9634 buf[4] = 1;
9635 etest->flags |= ETH_TEST_FL_FAILED;
9636 }
9637 if (bp->port.pmf)
9638 if (bnx2x_link_test(bp) != 0) {
9639 buf[5] = 1;
9640 etest->flags |= ETH_TEST_FL_FAILED;
9641 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009642
9643#ifdef BNX2X_EXTRA_DEBUG
9644 bnx2x_panic_dump(bp);
9645#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009646}
9647
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009648static const struct {
9649 long offset;
9650 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +00009651 u8 string[ETH_GSTRING_LEN];
9652} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9653/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9654 { Q_STATS_OFFSET32(error_bytes_received_hi),
9655 8, "[%d]: rx_error_bytes" },
9656 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9657 8, "[%d]: rx_ucast_packets" },
9658 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9659 8, "[%d]: rx_mcast_packets" },
9660 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9661 8, "[%d]: rx_bcast_packets" },
9662 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9663 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9664 4, "[%d]: rx_phy_ip_err_discards"},
9665 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9666 4, "[%d]: rx_skb_alloc_discard" },
9667 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9668
9669/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9670 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9671 8, "[%d]: tx_packets" }
9672};
9673
9674static const struct {
9675 long offset;
9676 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009677 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009678#define STATS_FLAGS_PORT 1
9679#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +00009680#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009681 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009682} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +00009683/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9684 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009685 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009686 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009687 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009688 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009689 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009690 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009691 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009692 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009693 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009694 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009695 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009696 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009697 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9698 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9699 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9700 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9701/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9702 8, STATS_FLAGS_PORT, "rx_fragments" },
9703 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9704 8, STATS_FLAGS_PORT, "rx_jabbers" },
9705 { STATS_OFFSET32(no_buff_discard_hi),
9706 8, STATS_FLAGS_BOTH, "rx_discards" },
9707 { STATS_OFFSET32(mac_filter_discard),
9708 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9709 { STATS_OFFSET32(xxoverflow_discard),
9710 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9711 { STATS_OFFSET32(brb_drop_hi),
9712 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9713 { STATS_OFFSET32(brb_truncate_hi),
9714 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9715 { STATS_OFFSET32(pause_frames_received_hi),
9716 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9717 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9718 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9719 { STATS_OFFSET32(nig_timer_max),
9720 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9721/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9722 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9723 { STATS_OFFSET32(rx_skb_alloc_failed),
9724 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9725 { STATS_OFFSET32(hw_csum_err),
9726 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9727
9728 { STATS_OFFSET32(total_bytes_transmitted_hi),
9729 8, STATS_FLAGS_BOTH, "tx_bytes" },
9730 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9731 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9732 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9733 8, STATS_FLAGS_BOTH, "tx_packets" },
9734 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9735 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9736 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9737 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009738 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009739 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009740 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009741 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009742/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009743 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009744 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009745 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009746 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009747 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009748 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009749 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009750 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009751 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009752 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009753 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009754 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009755 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009756 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009757 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009758 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009759 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009760 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009761 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009762/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009763 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009764 { STATS_OFFSET32(pause_frames_sent_hi),
9765 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009766};
9767
Eilon Greensteinde832a52009-02-12 08:36:33 +00009768#define IS_PORT_STAT(i) \
9769 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9770#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9771#define IS_E1HMF_MODE_STAT(bp) \
9772 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009773
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009774static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9775{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009776 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009777 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009778
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009779 switch (stringset) {
9780 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +00009781 if (is_multi(bp)) {
9782 k = 0;
9783 for_each_queue(bp, i) {
9784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9785 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9786 bnx2x_q_stats_arr[j].string, i);
9787 k += BNX2X_NUM_Q_STATS;
9788 }
9789 if (IS_E1HMF_MODE_STAT(bp))
9790 break;
9791 for (j = 0; j < BNX2X_NUM_STATS; j++)
9792 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9793 bnx2x_stats_arr[j].string);
9794 } else {
9795 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9796 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9797 continue;
9798 strcpy(buf + j*ETH_GSTRING_LEN,
9799 bnx2x_stats_arr[i].string);
9800 j++;
9801 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009802 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009803 break;
9804
9805 case ETH_SS_TEST:
9806 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9807 break;
9808 }
9809}
9810
9811static int bnx2x_get_stats_count(struct net_device *dev)
9812{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009813 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009814 int i, num_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009815
Eilon Greensteinde832a52009-02-12 08:36:33 +00009816 if (is_multi(bp)) {
9817 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9818 if (!IS_E1HMF_MODE_STAT(bp))
9819 num_stats += BNX2X_NUM_STATS;
9820 } else {
9821 if (IS_E1HMF_MODE_STAT(bp)) {
9822 num_stats = 0;
9823 for (i = 0; i < BNX2X_NUM_STATS; i++)
9824 if (IS_FUNC_STAT(i))
9825 num_stats++;
9826 } else
9827 num_stats = BNX2X_NUM_STATS;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009828 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009829
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009830 return num_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009831}
9832
9833static void bnx2x_get_ethtool_stats(struct net_device *dev,
9834 struct ethtool_stats *stats, u64 *buf)
9835{
9836 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009837 u32 *hw_stats, *offset;
9838 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009839
Eilon Greensteinde832a52009-02-12 08:36:33 +00009840 if (is_multi(bp)) {
9841 k = 0;
9842 for_each_queue(bp, i) {
9843 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9844 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9845 if (bnx2x_q_stats_arr[j].size == 0) {
9846 /* skip this counter */
9847 buf[k + j] = 0;
9848 continue;
9849 }
9850 offset = (hw_stats +
9851 bnx2x_q_stats_arr[j].offset);
9852 if (bnx2x_q_stats_arr[j].size == 4) {
9853 /* 4-byte counter */
9854 buf[k + j] = (u64) *offset;
9855 continue;
9856 }
9857 /* 8-byte counter */
9858 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9859 }
9860 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009861 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009862 if (IS_E1HMF_MODE_STAT(bp))
9863 return;
9864 hw_stats = (u32 *)&bp->eth_stats;
9865 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9866 if (bnx2x_stats_arr[j].size == 0) {
9867 /* skip this counter */
9868 buf[k + j] = 0;
9869 continue;
9870 }
9871 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9872 if (bnx2x_stats_arr[j].size == 4) {
9873 /* 4-byte counter */
9874 buf[k + j] = (u64) *offset;
9875 continue;
9876 }
9877 /* 8-byte counter */
9878 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009879 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009880 } else {
9881 hw_stats = (u32 *)&bp->eth_stats;
9882 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9883 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9884 continue;
9885 if (bnx2x_stats_arr[i].size == 0) {
9886 /* skip this counter */
9887 buf[j] = 0;
9888 j++;
9889 continue;
9890 }
9891 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9892 if (bnx2x_stats_arr[i].size == 4) {
9893 /* 4-byte counter */
9894 buf[j] = (u64) *offset;
9895 j++;
9896 continue;
9897 }
9898 /* 8-byte counter */
9899 buf[j] = HILO_U64(*offset, *(offset + 1));
9900 j++;
9901 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009902 }
9903}
9904
9905static int bnx2x_phys_id(struct net_device *dev, u32 data)
9906{
9907 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009908 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009909 int i;
9910
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009911 if (!netif_running(dev))
9912 return 0;
9913
9914 if (!bp->port.pmf)
9915 return 0;
9916
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009917 if (data == 0)
9918 data = 2;
9919
9920 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009921 if ((i % 2) == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009922 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009923 bp->link_params.hw_led_mode,
9924 bp->link_params.chip_id);
9925 else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009926 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009927 bp->link_params.hw_led_mode,
9928 bp->link_params.chip_id);
9929
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009930 msleep_interruptible(500);
9931 if (signal_pending(current))
9932 break;
9933 }
9934
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009935 if (bp->link_vars.link_up)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009936 bnx2x_set_led(bp, port, LED_MODE_OPER,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009937 bp->link_vars.line_speed,
9938 bp->link_params.hw_led_mode,
9939 bp->link_params.chip_id);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009940
9941 return 0;
9942}
9943
9944static struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009945 .get_settings = bnx2x_get_settings,
9946 .set_settings = bnx2x_set_settings,
9947 .get_drvinfo = bnx2x_get_drvinfo,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009948 .get_wol = bnx2x_get_wol,
9949 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009950 .get_msglevel = bnx2x_get_msglevel,
9951 .set_msglevel = bnx2x_set_msglevel,
9952 .nway_reset = bnx2x_nway_reset,
9953 .get_link = ethtool_op_get_link,
9954 .get_eeprom_len = bnx2x_get_eeprom_len,
9955 .get_eeprom = bnx2x_get_eeprom,
9956 .set_eeprom = bnx2x_set_eeprom,
9957 .get_coalesce = bnx2x_get_coalesce,
9958 .set_coalesce = bnx2x_set_coalesce,
9959 .get_ringparam = bnx2x_get_ringparam,
9960 .set_ringparam = bnx2x_set_ringparam,
9961 .get_pauseparam = bnx2x_get_pauseparam,
9962 .set_pauseparam = bnx2x_set_pauseparam,
9963 .get_rx_csum = bnx2x_get_rx_csum,
9964 .set_rx_csum = bnx2x_set_rx_csum,
9965 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009966 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009967 .set_flags = bnx2x_set_flags,
9968 .get_flags = ethtool_op_get_flags,
9969 .get_sg = ethtool_op_get_sg,
9970 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009971 .get_tso = ethtool_op_get_tso,
9972 .set_tso = bnx2x_set_tso,
9973 .self_test_count = bnx2x_self_test_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009974 .self_test = bnx2x_self_test,
9975 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009976 .phys_id = bnx2x_phys_id,
9977 .get_stats_count = bnx2x_get_stats_count,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009978 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009979};
9980
9981/* end of ethtool_ops */
9982
9983/****************************************************************************
9984* General service functions
9985****************************************************************************/
9986
9987static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9988{
9989 u16 pmcsr;
9990
9991 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9992
9993 switch (state) {
9994 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009995 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009996 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9997 PCI_PM_CTRL_PME_STATUS));
9998
9999 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010000 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010001 msleep(20);
10002 break;
10003
10004 case PCI_D3hot:
10005 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10006 pmcsr |= 3;
10007
10008 if (bp->wol)
10009 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10010
10011 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10012 pmcsr);
10013
10014 /* No more memory access after this point until
10015 * device is brought back to D0.
10016 */
10017 break;
10018
10019 default:
10020 return -EINVAL;
10021 }
10022 return 0;
10023}
10024
Eilon Greenstein237907c2009-01-14 06:42:44 +000010025static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10026{
10027 u16 rx_cons_sb;
10028
10029 /* Tell compiler that status block fields can change */
10030 barrier();
10031 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10032 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10033 rx_cons_sb++;
10034 return (fp->rx_comp_cons != rx_cons_sb);
10035}
10036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010037/*
10038 * net_device service functions
10039 */
10040
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010041static int bnx2x_poll(struct napi_struct *napi, int budget)
10042{
10043 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10044 napi);
10045 struct bnx2x *bp = fp->bp;
10046 int work_done = 0;
10047
10048#ifdef BNX2X_STOP_ON_ERROR
10049 if (unlikely(bp->panic))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010050 goto poll_panic;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010051#endif
10052
10053 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10054 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10055 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10056
10057 bnx2x_update_fpsb_idx(fp);
10058
Eilon Greenstein237907c2009-01-14 06:42:44 +000010059 if (bnx2x_has_tx_work(fp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010060 bnx2x_tx_int(fp, budget);
10061
Eilon Greenstein237907c2009-01-14 06:42:44 +000010062 if (bnx2x_has_rx_work(fp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010063 work_done = bnx2x_rx_int(fp, budget);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070010064 rmb(); /* BNX2X_HAS_WORK() reads the status block */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010065
10066 /* must not complete if we consumed full budget */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070010067 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010068
10069#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010070poll_panic:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010071#endif
Ben Hutchings288379f2009-01-19 16:43:59 -080010072 napi_complete(napi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010073
Eilon Greenstein0626b892009-02-12 08:38:14 +000010074 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010075 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010076 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010077 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10078 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010079 return work_done;
10080}
10081
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010082
10083/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010084 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010085 * we use one mapping for both BDs
10086 * So far this has only been observed to happen
10087 * in Other Operating Systems(TM)
10088 */
10089static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10090 struct bnx2x_fastpath *fp,
10091 struct eth_tx_bd **tx_bd, u16 hlen,
10092 u16 bd_prod, int nbd)
10093{
10094 struct eth_tx_bd *h_tx_bd = *tx_bd;
10095 struct eth_tx_bd *d_tx_bd;
10096 dma_addr_t mapping;
10097 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10098
10099 /* first fix first BD */
10100 h_tx_bd->nbd = cpu_to_le16(nbd);
10101 h_tx_bd->nbytes = cpu_to_le16(hlen);
10102
10103 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10104 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10105 h_tx_bd->addr_lo, h_tx_bd->nbd);
10106
10107 /* now get a new data BD
10108 * (after the pbd) and fill it */
10109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10110 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10111
10112 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10113 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10114
10115 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10116 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10117 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10118 d_tx_bd->vlan = 0;
10119 /* this marks the BD as one that has no individual mapping
10120 * the FW ignores this flag in a BD not marked start
10121 */
10122 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10123 DP(NETIF_MSG_TX_QUEUED,
10124 "TSO split data size is %d (%x:%x)\n",
10125 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10126
10127 /* update tx_bd for marking the last BD flag */
10128 *tx_bd = d_tx_bd;
10129
10130 return bd_prod;
10131}
10132
10133static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10134{
10135 if (fix > 0)
10136 csum = (u16) ~csum_fold(csum_sub(csum,
10137 csum_partial(t_header - fix, fix, 0)));
10138
10139 else if (fix < 0)
10140 csum = (u16) ~csum_fold(csum_add(csum,
10141 csum_partial(t_header, -fix, 0)));
10142
10143 return swab16(csum);
10144}
10145
10146static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10147{
10148 u32 rc;
10149
10150 if (skb->ip_summed != CHECKSUM_PARTIAL)
10151 rc = XMIT_PLAIN;
10152
10153 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010154 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010155 rc = XMIT_CSUM_V6;
10156 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10157 rc |= XMIT_CSUM_TCP;
10158
10159 } else {
10160 rc = XMIT_CSUM_V4;
10161 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10162 rc |= XMIT_CSUM_TCP;
10163 }
10164 }
10165
10166 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10167 rc |= XMIT_GSO_V4;
10168
10169 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10170 rc |= XMIT_GSO_V6;
10171
10172 return rc;
10173}
10174
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010175#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010176/* check if packet requires linearization (packet is too fragmented) */
10177static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10178 u32 xmit_type)
10179{
10180 int to_copy = 0;
10181 int hlen = 0;
10182 int first_bd_sz = 0;
10183
10184 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10185 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10186
10187 if (xmit_type & XMIT_GSO) {
10188 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10189 /* Check if LSO packet needs to be copied:
10190 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10191 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070010192 /* Number of windows to check */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010193 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10194 int wnd_idx = 0;
10195 int frag_idx = 0;
10196 u32 wnd_sum = 0;
10197
10198 /* Headers length */
10199 hlen = (int)(skb_transport_header(skb) - skb->data) +
10200 tcp_hdrlen(skb);
10201
10202 /* Amount of data (w/o headers) on linear part of SKB*/
10203 first_bd_sz = skb_headlen(skb) - hlen;
10204
10205 wnd_sum = first_bd_sz;
10206
10207 /* Calculate the first sum - it's special */
10208 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10209 wnd_sum +=
10210 skb_shinfo(skb)->frags[frag_idx].size;
10211
10212 /* If there was data on linear skb data - check it */
10213 if (first_bd_sz > 0) {
10214 if (unlikely(wnd_sum < lso_mss)) {
10215 to_copy = 1;
10216 goto exit_lbl;
10217 }
10218
10219 wnd_sum -= first_bd_sz;
10220 }
10221
10222 /* Others are easier: run through the frag list and
10223 check all windows */
10224 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10225 wnd_sum +=
10226 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10227
10228 if (unlikely(wnd_sum < lso_mss)) {
10229 to_copy = 1;
10230 break;
10231 }
10232 wnd_sum -=
10233 skb_shinfo(skb)->frags[wnd_idx].size;
10234 }
10235
10236 } else {
10237 /* in non-LSO too fragmented packet should always
10238 be linearized */
10239 to_copy = 1;
10240 }
10241 }
10242
10243exit_lbl:
10244 if (unlikely(to_copy))
10245 DP(NETIF_MSG_TX_QUEUED,
10246 "Linearization IS REQUIRED for %s packet. "
10247 "num_frags %d hlen %d first_bd_sz %d\n",
10248 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10249 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10250
10251 return to_copy;
10252}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010253#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010254
10255/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010257 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010258 */
10259static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10260{
10261 struct bnx2x *bp = netdev_priv(dev);
10262 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010263 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010264 struct sw_tx_bd *tx_buf;
10265 struct eth_tx_bd *tx_bd;
10266 struct eth_tx_parse_bd *pbd = NULL;
10267 u16 pkt_prod, bd_prod;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010268 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010269 dma_addr_t mapping;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010270 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10271 int vlan_off = (bp->e1hov ? 4 : 0);
10272 int i;
10273 u8 hlen = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010274
10275#ifdef BNX2X_STOP_ON_ERROR
10276 if (unlikely(bp->panic))
10277 return NETDEV_TX_BUSY;
10278#endif
10279
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010280 fp_index = skb_get_queue_mapping(skb);
10281 txq = netdev_get_tx_queue(dev, fp_index);
10282
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010283 fp = &bp->fp[fp_index];
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010284
Yitchak Gertner231fd582008-08-25 15:27:06 -070010285 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010286 fp->eth_q_stats.driver_xoff++,
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010287 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010288 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10289 return NETDEV_TX_BUSY;
10290 }
10291
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010292 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10293 " gso type %x xmit_type %x\n",
10294 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10295 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10296
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010297#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greenstein33471622008-08-13 15:59:08 -070010298 /* First, check if we need to linearize the skb
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010299 (due to FW restrictions) */
10300 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10301 /* Statistics of linearization */
10302 bp->lin_cnt++;
10303 if (skb_linearize(skb) != 0) {
10304 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10305 "silently dropping this SKB\n");
10306 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070010307 return NETDEV_TX_OK;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010308 }
10309 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010310#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010311
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010312 /*
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010313 Please read carefully. First we use one BD which we mark as start,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010314 then for TSO or xsum we have a parsing info BD,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010315 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010316 (don't forget to mark the last one as last,
10317 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010318 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010319 */
10320
10321 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010322 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010323
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010324 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010325 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10326 tx_bd = &fp->tx_desc_ring[bd_prod];
10327
10328 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10329 tx_bd->general_data = (UNICAST_ADDRESS <<
10330 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010331 /* header nbd */
10332 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010333
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010334 /* remember the first BD of the packet */
10335 tx_buf->first_bd = fp->tx_bd_prod;
10336 tx_buf->skb = skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010337
10338 DP(NETIF_MSG_TX_QUEUED,
10339 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10340 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10341
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010342#ifdef BCM_VLAN
10343 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10344 (bp->flags & HW_VLAN_TX_FLAG)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010345 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10346 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010347 vlan_off += 4;
10348 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010349#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010350 tx_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010351
10352 if (xmit_type) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010353 /* turn on parsing and get a BD */
10354 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10355 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10356
10357 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10358 }
10359
10360 if (xmit_type & XMIT_CSUM) {
10361 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10362
10363 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010364 pbd->global_data =
10365 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10366 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010367
10368 pbd->ip_hlen = (skb_transport_header(skb) -
10369 skb_network_header(skb)) / 2;
10370
10371 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10372
10373 pbd->total_hlen = cpu_to_le16(hlen);
10374 hlen = hlen*2 - vlan_off;
10375
10376 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10377
10378 if (xmit_type & XMIT_CSUM_V4)
10379 tx_bd->bd_flags.as_bitfield |=
10380 ETH_TX_BD_FLAGS_IP_CSUM;
10381 else
10382 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10383
10384 if (xmit_type & XMIT_CSUM_TCP) {
10385 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10386
10387 } else {
10388 s8 fix = SKB_CS_OFF(skb); /* signed! */
10389
10390 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10391 pbd->cs_offset = fix / 2;
10392
10393 DP(NETIF_MSG_TX_QUEUED,
10394 "hlen %d offset %d fix %d csum before fix %x\n",
10395 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10396 SKB_CS(skb));
10397
10398 /* HW bug: fixup the CSUM */
10399 pbd->tcp_pseudo_csum =
10400 bnx2x_csum_fix(skb_transport_header(skb),
10401 SKB_CS(skb), fix);
10402
10403 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10404 pbd->tcp_pseudo_csum);
10405 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010406 }
10407
10408 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010409 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010410
10411 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10412 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eilon Greenstein6378c022008-08-13 15:59:25 -070010413 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010414 tx_bd->nbd = cpu_to_le16(nbd);
10415 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10416
10417 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010418 " nbytes %d flags %x vlan %x\n",
10419 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10420 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10421 le16_to_cpu(tx_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010422
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010423 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010424
10425 DP(NETIF_MSG_TX_QUEUED,
10426 "TSO packet len %d hlen %d total len %d tso size %d\n",
10427 skb->len, hlen, skb_headlen(skb),
10428 skb_shinfo(skb)->gso_size);
10429
10430 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10431
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010432 if (unlikely(skb_headlen(skb) > hlen))
10433 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10434 bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010435
10436 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10437 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010438 pbd->tcp_flags = pbd_tcp_flags(skb);
10439
10440 if (xmit_type & XMIT_GSO_V4) {
10441 pbd->ip_id = swab16(ip_hdr(skb)->id);
10442 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010443 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10444 ip_hdr(skb)->daddr,
10445 0, IPPROTO_TCP, 0));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010446
10447 } else
10448 pbd->tcp_pseudo_csum =
10449 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10450 &ipv6_hdr(skb)->daddr,
10451 0, IPPROTO_TCP, 0));
10452
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010453 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10454 }
10455
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010458
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010459 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10460 tx_bd = &fp->tx_desc_ring[bd_prod];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010461
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010462 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10463 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010464
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010465 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10466 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10467 tx_bd->nbytes = cpu_to_le16(frag->size);
10468 tx_bd->vlan = cpu_to_le16(pkt_prod);
10469 tx_bd->bd_flags.as_bitfield = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010470
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010471 DP(NETIF_MSG_TX_QUEUED,
10472 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10473 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10474 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010475 }
10476
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010477 /* now at last mark the BD as the last BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010478 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10479
10480 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10481 tx_bd, tx_bd->bd_flags.as_bitfield);
10482
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010483 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10484
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010485 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010486 * if the packet contains or ends with it
10487 */
10488 if (TX_BD_POFF(bd_prod) < nbd)
10489 nbd++;
10490
10491 if (pbd)
10492 DP(NETIF_MSG_TX_QUEUED,
10493 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10494 " tcp_flags %x xsum %x seq %u hlen %u\n",
10495 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10496 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010497 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010498
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010499 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010500
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010501 /*
10502 * Make sure that the BD data is updated before updating the producer
10503 * since FW might read the BD right after the producer is updated.
10504 * This is only applicable for weak-ordered memory model archs such
10505 * as IA-64. The following barrier is also mandatory since FW will
10506 * assumes packets must have BDs.
10507 */
10508 wmb();
10509
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010510 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010511 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010512 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010513 DOORBELL(bp, fp->index, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010514
10515 mmiowb();
10516
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010517 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010518 dev->trans_start = jiffies;
10519
10520 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010521 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10522 if we put Tx into XOFF state. */
10523 smp_mb();
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010524 netif_tx_stop_queue(txq);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010525 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010526 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010527 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010528 }
10529 fp->tx_pkt++;
10530
10531 return NETDEV_TX_OK;
10532}
10533
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010534/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010535static int bnx2x_open(struct net_device *dev)
10536{
10537 struct bnx2x *bp = netdev_priv(dev);
10538
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000010539 netif_carrier_off(dev);
10540
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010541 bnx2x_set_power_state(bp, PCI_D0);
10542
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010543 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010544}
10545
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010546/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010547static int bnx2x_close(struct net_device *dev)
10548{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010549 struct bnx2x *bp = netdev_priv(dev);
10550
10551 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010552 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10553 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10554 if (!CHIP_REV_IS_SLOW(bp))
10555 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010556
10557 return 0;
10558}
10559
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010560/* called with netif_tx_lock from set_multicast */
10561static void bnx2x_set_rx_mode(struct net_device *dev)
10562{
10563 struct bnx2x *bp = netdev_priv(dev);
10564 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10565 int port = BP_PORT(bp);
10566
10567 if (bp->state != BNX2X_STATE_OPEN) {
10568 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10569 return;
10570 }
10571
10572 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10573
10574 if (dev->flags & IFF_PROMISC)
10575 rx_mode = BNX2X_RX_MODE_PROMISC;
10576
10577 else if ((dev->flags & IFF_ALLMULTI) ||
10578 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10579 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10580
10581 else { /* some multicasts */
10582 if (CHIP_IS_E1(bp)) {
10583 int i, old, offset;
10584 struct dev_mc_list *mclist;
10585 struct mac_configuration_cmd *config =
10586 bnx2x_sp(bp, mcast_config);
10587
10588 for (i = 0, mclist = dev->mc_list;
10589 mclist && (i < dev->mc_count);
10590 i++, mclist = mclist->next) {
10591
10592 config->config_table[i].
10593 cam_entry.msb_mac_addr =
10594 swab16(*(u16 *)&mclist->dmi_addr[0]);
10595 config->config_table[i].
10596 cam_entry.middle_mac_addr =
10597 swab16(*(u16 *)&mclist->dmi_addr[2]);
10598 config->config_table[i].
10599 cam_entry.lsb_mac_addr =
10600 swab16(*(u16 *)&mclist->dmi_addr[4]);
10601 config->config_table[i].cam_entry.flags =
10602 cpu_to_le16(port);
10603 config->config_table[i].
10604 target_table_entry.flags = 0;
10605 config->config_table[i].
10606 target_table_entry.client_id = 0;
10607 config->config_table[i].
10608 target_table_entry.vlan_id = 0;
10609
10610 DP(NETIF_MSG_IFUP,
10611 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10612 config->config_table[i].
10613 cam_entry.msb_mac_addr,
10614 config->config_table[i].
10615 cam_entry.middle_mac_addr,
10616 config->config_table[i].
10617 cam_entry.lsb_mac_addr);
10618 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010619 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010620 if (old > i) {
10621 for (; i < old; i++) {
10622 if (CAM_IS_INVALID(config->
10623 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000010624 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010625 break;
10626 }
10627 /* invalidate */
10628 CAM_INVALIDATE(config->
10629 config_table[i]);
10630 }
10631 }
10632
10633 if (CHIP_REV_IS_SLOW(bp))
10634 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10635 else
10636 offset = BNX2X_MAX_MULTICAST*(1 + port);
10637
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010638 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010639 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010640 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010641 config->hdr.reserved1 = 0;
10642
10643 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10644 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10645 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10646 0);
10647 } else { /* E1H */
10648 /* Accept one or more multicasts */
10649 struct dev_mc_list *mclist;
10650 u32 mc_filter[MC_HASH_SIZE];
10651 u32 crc, bit, regidx;
10652 int i;
10653
10654 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10655
10656 for (i = 0, mclist = dev->mc_list;
10657 mclist && (i < dev->mc_count);
10658 i++, mclist = mclist->next) {
10659
Johannes Berg7c510e42008-10-27 17:47:26 -070010660 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10661 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010662
10663 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10664 bit = (crc >> 24) & 0xff;
10665 regidx = bit >> 5;
10666 bit &= 0x1f;
10667 mc_filter[regidx] |= (1 << bit);
10668 }
10669
10670 for (i = 0; i < MC_HASH_SIZE; i++)
10671 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10672 mc_filter[i]);
10673 }
10674 }
10675
10676 bp->rx_mode = rx_mode;
10677 bnx2x_set_storm_rx_mode(bp);
10678}
10679
10680/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010681static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10682{
10683 struct sockaddr *addr = p;
10684 struct bnx2x *bp = netdev_priv(dev);
10685
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010686 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010687 return -EINVAL;
10688
10689 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010690 if (netif_running(dev)) {
10691 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070010692 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010693 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070010694 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010695 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010696
10697 return 0;
10698}
10699
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010700/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010701static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10702{
10703 struct mii_ioctl_data *data = if_mii(ifr);
10704 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010705 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010706 int err;
10707
10708 switch (cmd) {
10709 case SIOCGMIIPHY:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010710 data->phy_id = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010711
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010712 /* fallthrough */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010714 case SIOCGMIIREG: {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010715 u16 mii_regval;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010716
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010717 if (!netif_running(dev))
10718 return -EAGAIN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010719
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010720 mutex_lock(&bp->port.phy_mutex);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010721 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010722 DEFAULT_PHY_DEV_ADDR,
10723 (data->reg_num & 0x1f), &mii_regval);
10724 data->val_out = mii_regval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010725 mutex_unlock(&bp->port.phy_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010726 return err;
10727 }
10728
10729 case SIOCSMIIREG:
10730 if (!capable(CAP_NET_ADMIN))
10731 return -EPERM;
10732
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010733 if (!netif_running(dev))
10734 return -EAGAIN;
10735
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010736 mutex_lock(&bp->port.phy_mutex);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010737 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010738 DEFAULT_PHY_DEV_ADDR,
10739 (data->reg_num & 0x1f), data->val_in);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010740 mutex_unlock(&bp->port.phy_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010741 return err;
10742
10743 default:
10744 /* do nothing */
10745 break;
10746 }
10747
10748 return -EOPNOTSUPP;
10749}
10750
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010751/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010752static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10753{
10754 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010755 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010756
10757 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10758 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10759 return -EINVAL;
10760
10761 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010762 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010763 * only updated as part of load
10764 */
10765 dev->mtu = new_mtu;
10766
10767 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010768 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10769 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010770 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010771
10772 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010773}
10774
10775static void bnx2x_tx_timeout(struct net_device *dev)
10776{
10777 struct bnx2x *bp = netdev_priv(dev);
10778
10779#ifdef BNX2X_STOP_ON_ERROR
10780 if (!bp->panic)
10781 bnx2x_panic();
10782#endif
10783 /* This allows the netif to be shutdown gracefully before resetting */
10784 schedule_work(&bp->reset_task);
10785}
10786
10787#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010788/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010789static void bnx2x_vlan_rx_register(struct net_device *dev,
10790 struct vlan_group *vlgrp)
10791{
10792 struct bnx2x *bp = netdev_priv(dev);
10793
10794 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010795
10796 /* Set flags according to the required capabilities */
10797 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10798
10799 if (dev->features & NETIF_F_HW_VLAN_TX)
10800 bp->flags |= HW_VLAN_TX_FLAG;
10801
10802 if (dev->features & NETIF_F_HW_VLAN_RX)
10803 bp->flags |= HW_VLAN_RX_FLAG;
10804
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010805 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080010806 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010807}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010808
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010809#endif
10810
10811#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10812static void poll_bnx2x(struct net_device *dev)
10813{
10814 struct bnx2x *bp = netdev_priv(dev);
10815
10816 disable_irq(bp->pdev->irq);
10817 bnx2x_interrupt(bp->pdev->irq, dev);
10818 enable_irq(bp->pdev->irq);
10819}
10820#endif
10821
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080010822static const struct net_device_ops bnx2x_netdev_ops = {
10823 .ndo_open = bnx2x_open,
10824 .ndo_stop = bnx2x_close,
10825 .ndo_start_xmit = bnx2x_start_xmit,
10826 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10827 .ndo_set_mac_address = bnx2x_change_mac_addr,
10828 .ndo_validate_addr = eth_validate_addr,
10829 .ndo_do_ioctl = bnx2x_ioctl,
10830 .ndo_change_mtu = bnx2x_change_mtu,
10831 .ndo_tx_timeout = bnx2x_tx_timeout,
10832#ifdef BCM_VLAN
10833 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10834#endif
10835#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10836 .ndo_poll_controller = poll_bnx2x,
10837#endif
10838};
10839
10840
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010841static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10842 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010843{
10844 struct bnx2x *bp;
10845 int rc;
10846
10847 SET_NETDEV_DEV(dev, &pdev->dev);
10848 bp = netdev_priv(dev);
10849
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010850 bp->dev = dev;
10851 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010852 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010853 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010854
10855 rc = pci_enable_device(pdev);
10856 if (rc) {
10857 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10858 goto err_out;
10859 }
10860
10861 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10862 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10863 " aborting\n");
10864 rc = -ENODEV;
10865 goto err_out_disable;
10866 }
10867
10868 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10869 printk(KERN_ERR PFX "Cannot find second PCI device"
10870 " base address, aborting\n");
10871 rc = -ENODEV;
10872 goto err_out_disable;
10873 }
10874
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010875 if (atomic_read(&pdev->enable_cnt) == 1) {
10876 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10877 if (rc) {
10878 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10879 " aborting\n");
10880 goto err_out_disable;
10881 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010882
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010883 pci_set_master(pdev);
10884 pci_save_state(pdev);
10885 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010886
10887 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10888 if (bp->pm_cap == 0) {
10889 printk(KERN_ERR PFX "Cannot find power management"
10890 " capability, aborting\n");
10891 rc = -EIO;
10892 goto err_out_release;
10893 }
10894
10895 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10896 if (bp->pcie_cap == 0) {
10897 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10898 " aborting\n");
10899 rc = -EIO;
10900 goto err_out_release;
10901 }
10902
10903 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10904 bp->flags |= USING_DAC_FLAG;
10905 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10906 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10907 " failed, aborting\n");
10908 rc = -EIO;
10909 goto err_out_release;
10910 }
10911
10912 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10913 printk(KERN_ERR PFX "System does not support DMA,"
10914 " aborting\n");
10915 rc = -EIO;
10916 goto err_out_release;
10917 }
10918
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010919 dev->mem_start = pci_resource_start(pdev, 0);
10920 dev->base_addr = dev->mem_start;
10921 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010922
10923 dev->irq = pdev->irq;
10924
Arjan van de Ven275f1652008-10-20 21:42:39 -070010925 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010926 if (!bp->regview) {
10927 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10928 rc = -ENOMEM;
10929 goto err_out_release;
10930 }
10931
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10933 min_t(u64, BNX2X_DB_SIZE,
10934 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010935 if (!bp->doorbells) {
10936 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10937 rc = -ENOMEM;
10938 goto err_out_unmap;
10939 }
10940
10941 bnx2x_set_power_state(bp, PCI_D0);
10942
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010943 /* clean indirect addresses */
10944 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10945 PCICFG_VENDOR_ID_OFFSET);
10946 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10947 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10948 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10949 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010950
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010951 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010952
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080010953 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010954 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010955 dev->features |= NETIF_F_SG;
10956 dev->features |= NETIF_F_HW_CSUM;
10957 if (bp->flags & USING_DAC_FLAG)
10958 dev->features |= NETIF_F_HIGHDMA;
10959#ifdef BCM_VLAN
10960 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010961 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010962#endif
10963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010964 dev->features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010965
10966 return 0;
10967
10968err_out_unmap:
10969 if (bp->regview) {
10970 iounmap(bp->regview);
10971 bp->regview = NULL;
10972 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010973 if (bp->doorbells) {
10974 iounmap(bp->doorbells);
10975 bp->doorbells = NULL;
10976 }
10977
10978err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010979 if (atomic_read(&pdev->enable_cnt) == 1)
10980 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010981
10982err_out_disable:
10983 pci_disable_device(pdev);
10984 pci_set_drvdata(pdev, NULL);
10985
10986err_out:
10987 return rc;
10988}
10989
Eliezer Tamir25047952008-02-28 11:50:16 -080010990static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10991{
10992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10993
10994 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10995 return val;
10996}
10997
10998/* return value of 1=2.5GHz 2=5GHz */
10999static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11000{
11001 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11002
11003 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11004 return val;
11005}
11006
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011007static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11008 const struct pci_device_id *ent)
11009{
11010 static int version_printed;
11011 struct net_device *dev = NULL;
11012 struct bnx2x *bp;
Eliezer Tamir25047952008-02-28 11:50:16 -080011013 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011014
11015 if (version_printed++ == 0)
11016 printk(KERN_INFO "%s", version);
11017
11018 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011019 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011020 if (!dev) {
11021 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011022 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011023 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011024
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011025 bp = netdev_priv(dev);
11026 bp->msglevel = debug;
11027
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011028 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011029 if (rc < 0) {
11030 free_netdev(dev);
11031 return rc;
11032 }
11033
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011034 pci_set_drvdata(pdev, dev);
11035
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011036 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011037 if (rc)
11038 goto init_one_exit;
11039
11040 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011041 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011042 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011043 goto init_one_exit;
11044 }
11045
Eliezer Tamir25047952008-02-28 11:50:16 -080011046 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000011047 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011048 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eliezer Tamir25047952008-02-28 11:50:16 -080011049 bnx2x_get_pcie_width(bp),
11050 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11051 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070011052 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011053 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011054
11055init_one_exit:
11056 if (bp->regview)
11057 iounmap(bp->regview);
11058
11059 if (bp->doorbells)
11060 iounmap(bp->doorbells);
11061
11062 free_netdev(dev);
11063
11064 if (atomic_read(&pdev->enable_cnt) == 1)
11065 pci_release_regions(pdev);
11066
11067 pci_disable_device(pdev);
11068 pci_set_drvdata(pdev, NULL);
11069
11070 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011071}
11072
11073static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11074{
11075 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011076 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011077
Eliezer Tamir228241e2008-02-28 11:56:57 -080011078 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080011079 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11080 return;
11081 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011082 bp = netdev_priv(dev);
11083
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011084 unregister_netdev(dev);
11085
11086 if (bp->regview)
11087 iounmap(bp->regview);
11088
11089 if (bp->doorbells)
11090 iounmap(bp->doorbells);
11091
11092 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011093
11094 if (atomic_read(&pdev->enable_cnt) == 1)
11095 pci_release_regions(pdev);
11096
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011097 pci_disable_device(pdev);
11098 pci_set_drvdata(pdev, NULL);
11099}
11100
11101static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11102{
11103 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011104 struct bnx2x *bp;
11105
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011106 if (!dev) {
11107 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11108 return -ENODEV;
11109 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011110 bp = netdev_priv(dev);
11111
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011112 rtnl_lock();
11113
11114 pci_save_state(pdev);
11115
11116 if (!netif_running(dev)) {
11117 rtnl_unlock();
11118 return 0;
11119 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011120
11121 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011122
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011123 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011124
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011125 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080011126
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011127 rtnl_unlock();
11128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011129 return 0;
11130}
11131
11132static int bnx2x_resume(struct pci_dev *pdev)
11133{
11134 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011135 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011136 int rc;
11137
Eliezer Tamir228241e2008-02-28 11:56:57 -080011138 if (!dev) {
11139 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11140 return -ENODEV;
11141 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011142 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011143
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011144 rtnl_lock();
11145
Eliezer Tamir228241e2008-02-28 11:56:57 -080011146 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011147
11148 if (!netif_running(dev)) {
11149 rtnl_unlock();
11150 return 0;
11151 }
11152
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011153 bnx2x_set_power_state(bp, PCI_D0);
11154 netif_device_attach(dev);
11155
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011156 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011157
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011158 rtnl_unlock();
11159
11160 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011161}
11162
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011163static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11164{
11165 int i;
11166
11167 bp->state = BNX2X_STATE_ERROR;
11168
11169 bp->rx_mode = BNX2X_RX_MODE_NONE;
11170
11171 bnx2x_netif_stop(bp, 0);
11172
11173 del_timer_sync(&bp->timer);
11174 bp->stats_state = STATS_STATE_DISABLED;
11175 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11176
11177 /* Release IRQs */
11178 bnx2x_free_irq(bp);
11179
11180 if (CHIP_IS_E1(bp)) {
11181 struct mac_configuration_cmd *config =
11182 bnx2x_sp(bp, mcast_config);
11183
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011184 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011185 CAM_INVALIDATE(config->config_table[i]);
11186 }
11187
11188 /* Free SKBs, SGEs, TPA pool and driver internals */
11189 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011190 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011191 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011192 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000011193 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011194 bnx2x_free_mem(bp);
11195
11196 bp->state = BNX2X_STATE_CLOSED;
11197
11198 netif_carrier_off(bp->dev);
11199
11200 return 0;
11201}
11202
11203static void bnx2x_eeh_recover(struct bnx2x *bp)
11204{
11205 u32 val;
11206
11207 mutex_init(&bp->port.phy_mutex);
11208
11209 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11210 bp->link_params.shmem_base = bp->common.shmem_base;
11211 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11212
11213 if (!bp->common.shmem_base ||
11214 (bp->common.shmem_base < 0xA0000) ||
11215 (bp->common.shmem_base >= 0xC0000)) {
11216 BNX2X_DEV_INFO("MCP not active\n");
11217 bp->flags |= NO_MCP_FLAG;
11218 return;
11219 }
11220
11221 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11222 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11224 BNX2X_ERR("BAD MCP validity signature\n");
11225
11226 if (!BP_NOMCP(bp)) {
11227 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11228 & DRV_MSG_SEQ_NUMBER_MASK);
11229 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11230 }
11231}
11232
Wendy Xiong493adb12008-06-23 20:36:22 -070011233/**
11234 * bnx2x_io_error_detected - called when PCI error is detected
11235 * @pdev: Pointer to PCI device
11236 * @state: The current pci connection state
11237 *
11238 * This function is called after a PCI bus error affecting
11239 * this device has been detected.
11240 */
11241static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11242 pci_channel_state_t state)
11243{
11244 struct net_device *dev = pci_get_drvdata(pdev);
11245 struct bnx2x *bp = netdev_priv(dev);
11246
11247 rtnl_lock();
11248
11249 netif_device_detach(dev);
11250
11251 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011252 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070011253
11254 pci_disable_device(pdev);
11255
11256 rtnl_unlock();
11257
11258 /* Request a slot reset */
11259 return PCI_ERS_RESULT_NEED_RESET;
11260}
11261
11262/**
11263 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11264 * @pdev: Pointer to PCI device
11265 *
11266 * Restart the card from scratch, as if from a cold-boot.
11267 */
11268static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11269{
11270 struct net_device *dev = pci_get_drvdata(pdev);
11271 struct bnx2x *bp = netdev_priv(dev);
11272
11273 rtnl_lock();
11274
11275 if (pci_enable_device(pdev)) {
11276 dev_err(&pdev->dev,
11277 "Cannot re-enable PCI device after reset\n");
11278 rtnl_unlock();
11279 return PCI_ERS_RESULT_DISCONNECT;
11280 }
11281
11282 pci_set_master(pdev);
11283 pci_restore_state(pdev);
11284
11285 if (netif_running(dev))
11286 bnx2x_set_power_state(bp, PCI_D0);
11287
11288 rtnl_unlock();
11289
11290 return PCI_ERS_RESULT_RECOVERED;
11291}
11292
11293/**
11294 * bnx2x_io_resume - called when traffic can start flowing again
11295 * @pdev: Pointer to PCI device
11296 *
11297 * This callback is called when the error recovery driver tells us that
11298 * its OK to resume normal operation.
11299 */
11300static void bnx2x_io_resume(struct pci_dev *pdev)
11301{
11302 struct net_device *dev = pci_get_drvdata(pdev);
11303 struct bnx2x *bp = netdev_priv(dev);
11304
11305 rtnl_lock();
11306
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011307 bnx2x_eeh_recover(bp);
11308
Wendy Xiong493adb12008-06-23 20:36:22 -070011309 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011310 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070011311
11312 netif_device_attach(dev);
11313
11314 rtnl_unlock();
11315}
11316
11317static struct pci_error_handlers bnx2x_err_handler = {
11318 .error_detected = bnx2x_io_error_detected,
11319 .slot_reset = bnx2x_io_slot_reset,
11320 .resume = bnx2x_io_resume,
11321};
11322
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011323static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070011324 .name = DRV_MODULE_NAME,
11325 .id_table = bnx2x_pci_tbl,
11326 .probe = bnx2x_init_one,
11327 .remove = __devexit_p(bnx2x_remove_one),
11328 .suspend = bnx2x_suspend,
11329 .resume = bnx2x_resume,
11330 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011331};
11332
11333static int __init bnx2x_init(void)
11334{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080011335 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11336 if (bnx2x_wq == NULL) {
11337 printk(KERN_ERR PFX "Cannot create workqueue\n");
11338 return -ENOMEM;
11339 }
11340
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011341 return pci_register_driver(&bnx2x_pci_driver);
11342}
11343
11344static void __exit bnx2x_cleanup(void)
11345{
11346 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080011347
11348 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011349}
11350
11351module_init(bnx2x_init);
11352module_exit(bnx2x_cleanup);
11353