blob: e0851a744c421b068174e44224e84a77c53ae796 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
52
Eilon Greenstein359d8b12009-02-12 08:38:25 +000053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
56
Eilon Greenstein2b144022009-02-12 08:38:35 +000057#define DRV_MODULE_VERSION "1.48.102"
58#define DRV_MODULE_RELDATE "2009/02/12"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070059#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020060
Eilon Greenstein34f80b02008-06-23 20:33:01 -070061/* Time in jiffies before concluding the transmitter is hung */
62#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Andrew Morton53a10562008-02-09 23:16:41 -080064static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070065 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020066 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070068MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000069MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020072
Eilon Greenstein555f6c72009-02-12 08:36:11 +000073static int multi_mode = 1;
74module_param(multi_mode, int, 0);
75
Eilon Greenstein19680c42008-08-13 15:47:33 -070076static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070077module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000078MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000079
80static int int_mode;
81module_param(int_mode, int, 0);
82MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
Eilon Greenstein9898f862009-02-12 08:38:27 +000084static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020085module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000086MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +000087
88static int mrrs = -1;
89module_param(mrrs, int, 0);
90MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
Eilon Greenstein9898f862009-02-12 08:38:27 +000092static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020093module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000094MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020097
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080098static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020099
100enum bnx2x_board_type {
101 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700102 BCM57711 = 1,
103 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200104};
105
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700106/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800107static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200108 char *name;
109} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200113};
114
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700115
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200181{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700182 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700216 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200217
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228
229 *wb_comp = 0;
230
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200232
233 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700238 if (!cnt) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700242 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700249
250 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200251}
252
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200254{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700255 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700292 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200293
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301
302 *wb_comp = 0;
303
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200305
306 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700310 if (!cnt) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700314 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200320 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700324
325 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200327
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
336}
337
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200354
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200360
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200363
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200372
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200380 }
381 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
464 }
465 }
466
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200467 return rc;
468}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800469
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000473 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800485 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800492 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505 BNX2X_ERR("begin crash dump -----------------\n");
506
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000507 /* Indices */
508 /* Common */
509 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
510 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
511 " spq_prod_idx(%u)\n",
512 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515 /* Rx */
516 for_each_rx_queue(bp, i) {
517 struct bnx2x_fastpath *fp = &bp->fp[i];
518
519 BNX2X_ERR("queue[%d]: rx_bd_prod(%x) rx_bd_cons(%x)"
520 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
521 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
522 i, fp->rx_bd_prod, fp->rx_bd_cons,
523 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
526 " fp_u_idx(%x) *sb_u_idx(%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge,
528 le16_to_cpu(fp->fp_u_idx),
529 fp->status_blk->u_status_block.status_block_index);
530 }
531
532 /* Tx */
533 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200534 struct bnx2x_fastpath *fp = &bp->fp[i];
535 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
536
537 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700538 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200539 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700540 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000541 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
542 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700543 fp->status_blk->c_status_block.status_block_index,
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700544 hw_prods->packets_prod, hw_prods->bds_prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000545 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Rings */
548 /* Rx */
549 for_each_rx_queue(bp, i) {
550 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200551
552 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000554 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200555 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700559 j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200560 }
561
Eilon Greenstein3196a882008-08-13 15:58:49 -0700562 start = RX_SGE(fp->rx_sge_prod);
563 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000564 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700565 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
569 j, rx_sge[1], rx_sge[0], sw_page->page);
570 }
571
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200572 start = RCQ_BD(fp->rx_comp_cons - 10);
573 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000574 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578 j, cqe[0], cqe[1], cqe[2], cqe[3]);
579 }
580 }
581
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000582 /* Tx */
583 for_each_tx_queue(bp, i) {
584 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588 for (j = start; j != end; j = TX_BD(j + 1)) {
589 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592 sw_bd->skb, sw_bd->first_bd);
593 }
594
595 start = TX_BD(fp->tx_bd_cons - 10);
596 end = TX_BD(fp->tx_bd_cons + 254);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602 }
603 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200604
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700605 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200606 bnx2x_mc_assert(bp);
607 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200608}
609
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800610static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700612 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200613 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614 u32 val = REG_RD(bp, addr);
615 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000616 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200617
618 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000619 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200621 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000623 } else if (msi) {
624 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200628 } else {
629 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800630 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200631 HC_CONFIG_0_REG_INT_LINE_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800633
Eilon Greenstein8badd272009-02-12 08:36:15 +0000634 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800636
637 REG_WR(bp, addr, val);
638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640 }
641
Eilon Greenstein8badd272009-02-12 08:36:15 +0000642 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
643 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644
645 REG_WR(bp, addr, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700646
647 if (CHIP_IS_E1H(bp)) {
648 /* init leading/trailing edge */
649 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000650 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000652 /* enable nig and gpio3 attention */
653 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700654 } else
655 val = 0xffff;
656
657 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660}
661
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800662static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200663{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700664 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200665 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666 u32 val = REG_RD(bp, addr);
667
668 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
675
Eilon Greenstein8badd272009-02-12 08:36:15 +0000676 /* flush all outstanding writes */
677 mmiowb();
678
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200679 REG_WR(bp, addr, val);
680 if (REG_RD(bp, addr) != val)
681 BNX2X_ERR("BUG! proper val not read from IGU!\n");
Eilon Greenstein356e2382009-02-12 08:38:32 +0000682
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683}
684
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700685static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200686{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200687 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000688 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200689
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200691 atomic_inc(&bp->intr_sem);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700692 if (disable_hw)
693 /* prevent the HW from sending interrupts */
694 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200695
696 /* make sure all ISRs are done */
697 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000698 synchronize_irq(bp->msix_table[0].vector);
699 offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200700 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000701 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200702 } else
703 synchronize_irq(bp->pdev->irq);
704
705 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800706 cancel_delayed_work(&bp->sp_task);
707 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200708}
709
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700710/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200711
712/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700713 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714 */
715
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700716static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717 u8 storm, u16 index, u8 op, u8 update)
718{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200721 struct igu_ack_register igu_ack;
722
723 igu_ack.status_block_index = index;
724 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700725 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729
Eilon Greenstein5c862842008-08-13 15:51:48 -0700730 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731 (*(u32 *)&igu_ack), hc_addr);
732 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733}
734
735static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736{
737 struct host_status_block *fpsb = fp->status_blk;
738 u16 rc = 0;
739
740 barrier(); /* status block is written to by the chip */
741 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
743 rc |= 1;
744 }
745 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
747 rc |= 2;
748 }
749 return rc;
750}
751
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200752static u16 bnx2x_ack_int(struct bnx2x *bp)
753{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700754 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755 COMMAND_REG_SIMD_MASK);
756 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200757
Eilon Greenstein5c862842008-08-13 15:51:48 -0700758 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
759 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200760
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200761 return result;
762}
763
764
765/*
766 * fast path service functions
767 */
768
Eilon Greenstein237907c2009-01-14 06:42:44 +0000769static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
770{
771 u16 tx_cons_sb;
772
773 /* Tell compiler that status block fields can change */
774 barrier();
775 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800776 return (fp->tx_pkt_cons != tx_cons_sb);
777}
778
779static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780{
781 /* Tell compiler that consumer and producer can change */
782 barrier();
783 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000784}
785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200786/* free skb in the packet ring at pos idx
787 * return idx of last bd freed
788 */
789static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790 u16 idx)
791{
792 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793 struct eth_tx_bd *tx_bd;
794 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700795 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200796 int nbd;
797
798 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
799 idx, tx_buf, skb);
800
801 /* unmap first bd */
802 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807 nbd = le16_to_cpu(tx_bd->nbd) - 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700808 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200809#ifdef BNX2X_STOP_ON_ERROR
810 if (nbd > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700811 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200812 bnx2x_panic();
813 }
814#endif
815
816 /* Skip a parse bd and the TSO split header bd
817 since they have no mapping */
818 if (nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822 ETH_TX_BD_FLAGS_TCP_CSUM |
823 ETH_TX_BD_FLAGS_SW_LSO)) {
824 if (--nbd)
825 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 /* is this a TSO split header bd? */
828 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829 if (--nbd)
830 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831 }
832 }
833
834 /* now free frags */
835 while (nbd > 0) {
836
837 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838 tx_bd = &fp->tx_desc_ring[bd_idx];
839 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841 if (--nbd)
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843 }
844
845 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700846 WARN_ON(!skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847 dev_kfree_skb(skb);
848 tx_buf->first_bd = 0;
849 tx_buf->skb = NULL;
850
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852}
853
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700854static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200855{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700856 s16 used;
857 u16 prod;
858 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700860 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 prod = fp->tx_bd_prod;
862 cons = fp->tx_bd_cons;
863
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700864 /* NUM_TX_RINGS = number of "next-page" entries
865 It will be used as a threshold */
866 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700868#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700869 WARN_ON(used < 0);
870 WARN_ON(used > fp->bp->tx_ring_size);
871 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700872#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700874 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875}
876
Eilon Greenstein7961f792009-03-02 07:59:31 +0000877static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200878{
879 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000880 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882 int done = 0;
883
884#ifdef BNX2X_STOP_ON_ERROR
885 if (unlikely(bp->panic))
886 return;
887#endif
888
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000889 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891 sw_cons = fp->tx_pkt_cons;
892
893 while (sw_cons != hw_cons) {
894 u16 pkt_cons;
895
896 pkt_cons = TX_BD(sw_cons);
897
898 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200901 hw_cons, sw_cons, pkt_cons);
902
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700903/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200904 rmb();
905 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906 }
907*/
908 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909 sw_cons++;
910 done++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911 }
912
913 fp->tx_pkt_cons = sw_cons;
914 fp->tx_bd_cons = bd_cons;
915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200916 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000917 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200918
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000919 __netif_tx_lock(txq, smp_processor_id());
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200920
Eilon Greenstein60447352009-03-02 07:59:24 +0000921 /* Need to make the tx_bd_cons update visible to start_xmit()
922 * before checking for netif_tx_queue_stopped(). Without the
923 * memory barrier, there is a small possibility that
924 * start_xmit() will miss it and cause the queue to be stopped
925 * forever.
926 */
927 smp_mb();
928
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000929 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700930 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200931 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000932 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200933
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000934 __netif_tx_unlock(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 }
936}
937
Eilon Greenstein3196a882008-08-13 15:58:49 -0700938
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200939static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
940 union eth_rx_cqe *rr_cqe)
941{
942 struct bnx2x *bp = fp->bp;
943 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
944 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
945
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700946 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200947 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000948 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700949 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200950
951 bp->spq_left++;
952
Eilon Greenstein0626b892009-02-12 08:38:14 +0000953 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200954 switch (command | fp->state) {
955 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
956 BNX2X_FP_STATE_OPENING):
957 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
958 cid);
959 fp->state = BNX2X_FP_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
963 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
964 cid);
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
968 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700969 BNX2X_ERR("unexpected MC reply (%d) "
970 "fp->state is %x\n", command, fp->state);
971 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200972 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700973 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200974 return;
975 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800976
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200977 switch (command | bp->state) {
978 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
979 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
980 bp->state = BNX2X_STATE_OPEN;
981 break;
982
983 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
984 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
985 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
986 fp->state = BNX2X_FP_STATE_HALTED;
987 break;
988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200989 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800991 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200992 break;
993
Eilon Greenstein3196a882008-08-13 15:58:49 -0700994
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200995 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700996 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200997 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -0700998 bp->set_mac_pending = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200999 break;
1000
Eliezer Tamir49d66772008-02-28 11:53:13 -08001001 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001002 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Eliezer Tamir49d66772008-02-28 11:53:13 -08001003 break;
1004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001005 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001006 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001007 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001008 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001009 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001010 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001011}
1012
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001013static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1014 struct bnx2x_fastpath *fp, u16 index)
1015{
1016 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1017 struct page *page = sw_buf->page;
1018 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1019
1020 /* Skip "next page" elements */
1021 if (!page)
1022 return;
1023
1024 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001025 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001026 __free_pages(page, PAGES_PER_SGE_SHIFT);
1027
1028 sw_buf->page = NULL;
1029 sge->addr_hi = 0;
1030 sge->addr_lo = 0;
1031}
1032
1033static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1034 struct bnx2x_fastpath *fp, int last)
1035{
1036 int i;
1037
1038 for (i = 0; i < last; i++)
1039 bnx2x_free_rx_sge(bp, fp, i);
1040}
1041
1042static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, u16 index)
1044{
1045 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1046 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1047 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1048 dma_addr_t mapping;
1049
1050 if (unlikely(page == NULL))
1051 return -ENOMEM;
1052
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001053 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001054 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001055 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001056 __free_pages(page, PAGES_PER_SGE_SHIFT);
1057 return -ENOMEM;
1058 }
1059
1060 sw_buf->page = page;
1061 pci_unmap_addr_set(sw_buf, mapping, mapping);
1062
1063 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1064 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1065
1066 return 0;
1067}
1068
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001069static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1070 struct bnx2x_fastpath *fp, u16 index)
1071{
1072 struct sk_buff *skb;
1073 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1074 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1075 dma_addr_t mapping;
1076
1077 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1078 if (unlikely(skb == NULL))
1079 return -ENOMEM;
1080
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001081 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001082 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001083 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001084 dev_kfree_skb(skb);
1085 return -ENOMEM;
1086 }
1087
1088 rx_buf->skb = skb;
1089 pci_unmap_addr_set(rx_buf, mapping, mapping);
1090
1091 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1092 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1093
1094 return 0;
1095}
1096
1097/* note that we are not allocating a new skb,
1098 * we are just moving one from cons to prod
1099 * we are not creating a new mapping,
1100 * so there is no need to check for dma_mapping_error().
1101 */
1102static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1103 struct sk_buff *skb, u16 cons, u16 prod)
1104{
1105 struct bnx2x *bp = fp->bp;
1106 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1107 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1108 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1109 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1110
1111 pci_dma_sync_single_for_device(bp->pdev,
1112 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001113 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001114
1115 prod_rx_buf->skb = cons_rx_buf->skb;
1116 pci_unmap_addr_set(prod_rx_buf, mapping,
1117 pci_unmap_addr(cons_rx_buf, mapping));
1118 *prod_bd = *cons_bd;
1119}
1120
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001121static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1122 u16 idx)
1123{
1124 u16 last_max = fp->last_max_sge;
1125
1126 if (SUB_S16(idx, last_max) > 0)
1127 fp->last_max_sge = idx;
1128}
1129
1130static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1131{
1132 int i, j;
1133
1134 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1135 int idx = RX_SGE_CNT * i - 1;
1136
1137 for (j = 0; j < 2; j++) {
1138 SGE_MASK_CLEAR_BIT(fp, idx);
1139 idx--;
1140 }
1141 }
1142}
1143
1144static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1145 struct eth_fast_path_rx_cqe *fp_cqe)
1146{
1147 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001148 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001149 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001150 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001151 u16 last_max, last_elem, first_elem;
1152 u16 delta = 0;
1153 u16 i;
1154
1155 if (!sge_len)
1156 return;
1157
1158 /* First mark all used pages */
1159 for (i = 0; i < sge_len; i++)
1160 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1161
1162 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1163 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1164
1165 /* Here we assume that the last SGE index is the biggest */
1166 prefetch((void *)(fp->sge_mask));
1167 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1168
1169 last_max = RX_SGE(fp->last_max_sge);
1170 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1171 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1172
1173 /* If ring is not full */
1174 if (last_elem + 1 != first_elem)
1175 last_elem++;
1176
1177 /* Now update the prod */
1178 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1179 if (likely(fp->sge_mask[i]))
1180 break;
1181
1182 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1183 delta += RX_SGE_MASK_ELEM_SZ;
1184 }
1185
1186 if (delta > 0) {
1187 fp->rx_sge_prod += delta;
1188 /* clear page-end entries */
1189 bnx2x_clear_sge_mask_next_elems(fp);
1190 }
1191
1192 DP(NETIF_MSG_RX_STATUS,
1193 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1194 fp->last_max_sge, fp->rx_sge_prod);
1195}
1196
1197static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1198{
1199 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1200 memset(fp->sge_mask, 0xff,
1201 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1202
Eilon Greenstein33471622008-08-13 15:59:08 -07001203 /* Clear the two last indices in the page to 1:
1204 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001205 hence will never be indicated and should be removed from
1206 the calculations. */
1207 bnx2x_clear_sge_mask_next_elems(fp);
1208}
1209
1210static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1212{
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1217 dma_addr_t mapping;
1218
1219 /* move empty skb from pool to prod and map it */
1220 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1221 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001222 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001223 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1224
1225 /* move partial skb from cons to pool (don't unmap yet) */
1226 fp->tpa_pool[queue] = *cons_rx_buf;
1227
1228 /* mark bin state as start - print error if current state != stop */
1229 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1230 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1231
1232 fp->tpa_state[queue] = BNX2X_TPA_START;
1233
1234 /* point prod_bd to new skb */
1235 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1236 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1237
1238#ifdef BNX2X_STOP_ON_ERROR
1239 fp->tpa_queue_used |= (1 << queue);
1240#ifdef __powerpc64__
1241 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1242#else
1243 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1244#endif
1245 fp->tpa_queue_used);
1246#endif
1247}
1248
1249static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1250 struct sk_buff *skb,
1251 struct eth_fast_path_rx_cqe *fp_cqe,
1252 u16 cqe_idx)
1253{
1254 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001255 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1256 u32 i, frag_len, frag_size, pages;
1257 int err;
1258 int j;
1259
1260 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001261 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001262
1263 /* This is needed in order to enable forwarding support */
1264 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001265 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001266 max(frag_size, (u32)len_on_bd));
1267
1268#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001269 if (pages >
1270 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001271 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1272 pages, cqe_idx);
1273 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1274 fp_cqe->pkt_len, len_on_bd);
1275 bnx2x_panic();
1276 return -EINVAL;
1277 }
1278#endif
1279
1280 /* Run through the SGL and compose the fragmented skb */
1281 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1282 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1283
1284 /* FW gives the indices of the SGE as if the ring is an array
1285 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001286 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001287 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001288 old_rx_pg = *rx_pg;
1289
1290 /* If we fail to allocate a substitute page, we simply stop
1291 where we are and drop the whole packet */
1292 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1293 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001294 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001295 return err;
1296 }
1297
1298 /* Unmap the page as we r going to pass it to the stack */
1299 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001300 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001301
1302 /* Add one frag and update the appropriate fields in the skb */
1303 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1304
1305 skb->data_len += frag_len;
1306 skb->truesize += frag_len;
1307 skb->len += frag_len;
1308
1309 frag_size -= frag_len;
1310 }
1311
1312 return 0;
1313}
1314
1315static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1316 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1317 u16 cqe_idx)
1318{
1319 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1320 struct sk_buff *skb = rx_buf->skb;
1321 /* alloc new skb */
1322 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1323
1324 /* Unmap skb in the pool anyway, as we are going to change
1325 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1326 fails. */
1327 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001328 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001329
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001330 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001331 /* fix ip xsum and give it to the stack */
1332 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001333#ifdef BCM_VLAN
1334 int is_vlan_cqe =
1335 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1336 PARSING_FLAGS_VLAN);
1337 int is_not_hwaccel_vlan_cqe =
1338 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1339#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001340
1341 prefetch(skb);
1342 prefetch(((char *)(skb)) + 128);
1343
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001344#ifdef BNX2X_STOP_ON_ERROR
1345 if (pad + len > bp->rx_buf_size) {
1346 BNX2X_ERR("skb_put is about to fail... "
1347 "pad %d len %d rx_buf_size %d\n",
1348 pad, len, bp->rx_buf_size);
1349 bnx2x_panic();
1350 return;
1351 }
1352#endif
1353
1354 skb_reserve(skb, pad);
1355 skb_put(skb, len);
1356
1357 skb->protocol = eth_type_trans(skb, bp->dev);
1358 skb->ip_summed = CHECKSUM_UNNECESSARY;
1359
1360 {
1361 struct iphdr *iph;
1362
1363 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001364#ifdef BCM_VLAN
1365 /* If there is no Rx VLAN offloading -
1366 take VLAN tag into an account */
1367 if (unlikely(is_not_hwaccel_vlan_cqe))
1368 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1369#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001370 iph->check = 0;
1371 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1372 }
1373
1374 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1375 &cqe->fast_path_cqe, cqe_idx)) {
1376#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001377 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1378 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001379 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1380 le16_to_cpu(cqe->fast_path_cqe.
1381 vlan_tag));
1382 else
1383#endif
1384 netif_receive_skb(skb);
1385 } else {
1386 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1387 " - dropping packet!\n");
1388 dev_kfree_skb(skb);
1389 }
1390
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001391
1392 /* put new skb in bin */
1393 fp->tpa_pool[queue].skb = new_skb;
1394
1395 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001396 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001397 DP(NETIF_MSG_RX_STATUS,
1398 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001399 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001400 }
1401
1402 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1403}
1404
1405static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1406 struct bnx2x_fastpath *fp,
1407 u16 bd_prod, u16 rx_comp_prod,
1408 u16 rx_sge_prod)
1409{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001410 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001411 int i;
1412
1413 /* Update producers */
1414 rx_prods.bd_prod = bd_prod;
1415 rx_prods.cqe_prod = rx_comp_prod;
1416 rx_prods.sge_prod = rx_sge_prod;
1417
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001418 /*
1419 * Make sure that the BD and SGE data is updated before updating the
1420 * producers since FW might read the BD/SGE right after the producer
1421 * is updated.
1422 * This is only applicable for weak-ordered memory model archs such
1423 * as IA-64. The following barrier is also mandatory since FW will
1424 * assumes BDs must have buffers.
1425 */
1426 wmb();
1427
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001428 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1429 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001430 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001431 ((u32 *)&rx_prods)[i]);
1432
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001433 mmiowb(); /* keep prod updates ordered */
1434
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001435 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001436 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1437 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001438}
1439
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001440static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1441{
1442 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001443 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001444 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1445 int rx_pkt = 0;
1446
1447#ifdef BNX2X_STOP_ON_ERROR
1448 if (unlikely(bp->panic))
1449 return 0;
1450#endif
1451
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001452 /* CQ "next element" is of the size of the regular element,
1453 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001454 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1455 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1456 hw_comp_cons++;
1457
1458 bd_cons = fp->rx_bd_cons;
1459 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001460 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001461 sw_comp_cons = fp->rx_comp_cons;
1462 sw_comp_prod = fp->rx_comp_prod;
1463
1464 /* Memory barrier necessary as speculative reads of the rx
1465 * buffer can be ahead of the index in the status block
1466 */
1467 rmb();
1468
1469 DP(NETIF_MSG_RX_STATUS,
1470 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001471 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001472
1473 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001474 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001475 struct sk_buff *skb;
1476 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001477 u8 cqe_fp_flags;
1478 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001479
1480 comp_ring_cons = RCQ_BD(sw_comp_cons);
1481 bd_prod = RX_BD(bd_prod);
1482 bd_cons = RX_BD(bd_cons);
1483
1484 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001485 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001486
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001487 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001488 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1489 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001490 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001491 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1492 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493
1494 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001495 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001496 bnx2x_sp_event(fp, cqe);
1497 goto next_cqe;
1498
1499 /* this is an rx packet */
1500 } else {
1501 rx_buf = &fp->rx_buf_ring[bd_cons];
1502 skb = rx_buf->skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001503 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1504 pad = cqe->fast_path_cqe.placement_offset;
1505
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001506 /* If CQE is marked both TPA_START and TPA_END
1507 it is a non-TPA CQE */
1508 if ((!fp->disable_tpa) &&
1509 (TPA_TYPE(cqe_fp_flags) !=
1510 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001511 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001512
1513 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1514 DP(NETIF_MSG_RX_STATUS,
1515 "calling tpa_start on queue %d\n",
1516 queue);
1517
1518 bnx2x_tpa_start(fp, queue, skb,
1519 bd_cons, bd_prod);
1520 goto next_rx;
1521 }
1522
1523 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1524 DP(NETIF_MSG_RX_STATUS,
1525 "calling tpa_stop on queue %d\n",
1526 queue);
1527
1528 if (!BNX2X_RX_SUM_FIX(cqe))
1529 BNX2X_ERR("STOP on none TCP "
1530 "data\n");
1531
1532 /* This is a size of the linear data
1533 on this skb */
1534 len = le16_to_cpu(cqe->fast_path_cqe.
1535 len_on_bd);
1536 bnx2x_tpa_stop(bp, fp, queue, pad,
1537 len, cqe, comp_ring_cons);
1538#ifdef BNX2X_STOP_ON_ERROR
1539 if (bp->panic)
1540 return -EINVAL;
1541#endif
1542
1543 bnx2x_update_sge_prod(fp,
1544 &cqe->fast_path_cqe);
1545 goto next_cqe;
1546 }
1547 }
1548
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001549 pci_dma_sync_single_for_device(bp->pdev,
1550 pci_unmap_addr(rx_buf, mapping),
1551 pad + RX_COPY_THRESH,
1552 PCI_DMA_FROMDEVICE);
1553 prefetch(skb);
1554 prefetch(((char *)(skb)) + 128);
1555
1556 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001557 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001558 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001559 "ERROR flags %x rx packet %u\n",
1560 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001561 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001562 goto reuse_rx;
1563 }
1564
1565 /* Since we don't have a jumbo ring
1566 * copy small packets if mtu > 1500
1567 */
1568 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1569 (len <= RX_COPY_THRESH)) {
1570 struct sk_buff *new_skb;
1571
1572 new_skb = netdev_alloc_skb(bp->dev,
1573 len + pad);
1574 if (new_skb == NULL) {
1575 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001576 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001577 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001578 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001579 goto reuse_rx;
1580 }
1581
1582 /* aligned copy */
1583 skb_copy_from_linear_data_offset(skb, pad,
1584 new_skb->data + pad, len);
1585 skb_reserve(new_skb, pad);
1586 skb_put(new_skb, len);
1587
1588 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1589
1590 skb = new_skb;
1591
1592 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1593 pci_unmap_single(bp->pdev,
1594 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001595 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001596 PCI_DMA_FROMDEVICE);
1597 skb_reserve(skb, pad);
1598 skb_put(skb, len);
1599
1600 } else {
1601 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001602 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001603 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001604 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001605reuse_rx:
1606 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1607 goto next_rx;
1608 }
1609
1610 skb->protocol = eth_type_trans(skb, bp->dev);
1611
1612 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001613 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001614 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1615 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001616 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001617 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001618 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001619 }
1620
Eilon Greenstein748e5432009-02-12 08:36:37 +00001621 skb_record_rx_queue(skb, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001622#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001623 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001624 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1625 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001626 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1627 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1628 else
1629#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001630 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001631
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001632
1633next_rx:
1634 rx_buf->skb = NULL;
1635
1636 bd_cons = NEXT_RX_IDX(bd_cons);
1637 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001638 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1639 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001640next_cqe:
1641 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1642 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001643
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001644 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001645 break;
1646 } /* while */
1647
1648 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001649 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001650 fp->rx_comp_cons = sw_comp_cons;
1651 fp->rx_comp_prod = sw_comp_prod;
1652
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001653 /* Update producers */
1654 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1655 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001656
1657 fp->rx_pkt += rx_pkt;
1658 fp->rx_calls++;
1659
1660 return rx_pkt;
1661}
1662
1663static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1664{
1665 struct bnx2x_fastpath *fp = fp_cookie;
1666 struct bnx2x *bp = fp->bp;
Eilon Greenstein0626b892009-02-12 08:38:14 +00001667 int index = fp->index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001668
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001669 /* Return here if interrupt is disabled */
1670 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672 return IRQ_HANDLED;
1673 }
1674
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001675 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001676 index, fp->sb_id);
1677 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001678
1679#ifdef BNX2X_STOP_ON_ERROR
1680 if (unlikely(bp->panic))
1681 return IRQ_HANDLED;
1682#endif
1683
1684 prefetch(fp->rx_cons_sb);
1685 prefetch(fp->tx_cons_sb);
1686 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
Ben Hutchings288379f2009-01-19 16:43:59 -08001689 napi_schedule(&bnx2x_fp(bp, index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001690
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001691 return IRQ_HANDLED;
1692}
1693
1694static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1695{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001696 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001697 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001698 u16 mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001699
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001700 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001701 if (unlikely(status == 0)) {
1702 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1703 return IRQ_NONE;
1704 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001705 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001706
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001707 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001708 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1709 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1710 return IRQ_HANDLED;
1711 }
1712
Eilon Greenstein3196a882008-08-13 15:58:49 -07001713#ifdef BNX2X_STOP_ON_ERROR
1714 if (unlikely(bp->panic))
1715 return IRQ_HANDLED;
1716#endif
1717
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001718 mask = 0x2 << bp->fp[0].sb_id;
1719 if (status & mask) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720 struct bnx2x_fastpath *fp = &bp->fp[0];
1721
1722 prefetch(fp->rx_cons_sb);
1723 prefetch(fp->tx_cons_sb);
1724 prefetch(&fp->status_blk->c_status_block.status_block_index);
1725 prefetch(&fp->status_blk->u_status_block.status_block_index);
1726
Ben Hutchings288379f2009-01-19 16:43:59 -08001727 napi_schedule(&bnx2x_fp(bp, 0, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001729 status &= ~mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001730 }
1731
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001732
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001733 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001734 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001735
1736 status &= ~0x1;
1737 if (!status)
1738 return IRQ_HANDLED;
1739 }
1740
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001741 if (status)
1742 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1743 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001744
1745 return IRQ_HANDLED;
1746}
1747
1748/* end of fast path */
1749
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001750static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001751
1752/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001753
1754/*
1755 * General service functions
1756 */
1757
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001758static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001759{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001760 u32 lock_status;
1761 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001762 int func = BP_FUNC(bp);
1763 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001764 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001765
1766 /* Validating that the resource is within range */
1767 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1768 DP(NETIF_MSG_HW,
1769 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1770 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1771 return -EINVAL;
1772 }
1773
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001774 if (func <= 5) {
1775 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1776 } else {
1777 hw_lock_control_reg =
1778 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1779 }
1780
Eliezer Tamirf1410642008-02-28 11:51:50 -08001781 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001782 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001783 if (lock_status & resource_bit) {
1784 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1785 lock_status, resource_bit);
1786 return -EEXIST;
1787 }
1788
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001789 /* Try for 5 second every 5ms */
1790 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001791 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001792 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1793 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001794 if (lock_status & resource_bit)
1795 return 0;
1796
1797 msleep(5);
1798 }
1799 DP(NETIF_MSG_HW, "Timeout\n");
1800 return -EAGAIN;
1801}
1802
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001803static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001804{
1805 u32 lock_status;
1806 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001807 int func = BP_FUNC(bp);
1808 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001809
1810 /* Validating that the resource is within range */
1811 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1812 DP(NETIF_MSG_HW,
1813 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1814 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1815 return -EINVAL;
1816 }
1817
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001818 if (func <= 5) {
1819 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1820 } else {
1821 hw_lock_control_reg =
1822 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1823 }
1824
Eliezer Tamirf1410642008-02-28 11:51:50 -08001825 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001826 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001827 if (!(lock_status & resource_bit)) {
1828 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1829 lock_status, resource_bit);
1830 return -EFAULT;
1831 }
1832
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001833 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001834 return 0;
1835}
1836
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001837/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001838static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001839{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001840 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001841
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001842 if (bp->port.need_hw_lock)
1843 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001844}
1845
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001846static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001847{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001848 if (bp->port.need_hw_lock)
1849 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001851 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001852}
1853
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001854int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1855{
1856 /* The GPIO should be swapped if swap register is set and active */
1857 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1858 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1859 int gpio_shift = gpio_num +
1860 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1861 u32 gpio_mask = (1 << gpio_shift);
1862 u32 gpio_reg;
1863 int value;
1864
1865 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1866 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1867 return -EINVAL;
1868 }
1869
1870 /* read GPIO value */
1871 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1872
1873 /* get the requested pin value */
1874 if ((gpio_reg & gpio_mask) == gpio_mask)
1875 value = 1;
1876 else
1877 value = 0;
1878
1879 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1880
1881 return value;
1882}
1883
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001884int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885{
1886 /* The GPIO should be swapped if swap register is set and active */
1887 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001888 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001889 int gpio_shift = gpio_num +
1890 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1891 u32 gpio_mask = (1 << gpio_shift);
1892 u32 gpio_reg;
1893
1894 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1895 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1896 return -EINVAL;
1897 }
1898
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001899 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001900 /* read GPIO and mask except the float bits */
1901 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1902
1903 switch (mode) {
1904 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1905 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1906 gpio_num, gpio_shift);
1907 /* clear FLOAT and set CLR */
1908 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1909 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1910 break;
1911
1912 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1913 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1914 gpio_num, gpio_shift);
1915 /* clear FLOAT and set SET */
1916 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1917 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1918 break;
1919
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001920 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001921 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1922 gpio_num, gpio_shift);
1923 /* set FLOAT */
1924 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1925 break;
1926
1927 default:
1928 break;
1929 }
1930
1931 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001932 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001933
1934 return 0;
1935}
1936
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001937int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1938{
1939 /* The GPIO should be swapped if swap register is set and active */
1940 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1941 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1942 int gpio_shift = gpio_num +
1943 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1944 u32 gpio_mask = (1 << gpio_shift);
1945 u32 gpio_reg;
1946
1947 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1948 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1949 return -EINVAL;
1950 }
1951
1952 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1953 /* read GPIO int */
1954 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1955
1956 switch (mode) {
1957 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1958 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1959 "output low\n", gpio_num, gpio_shift);
1960 /* clear SET and set CLR */
1961 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1962 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1963 break;
1964
1965 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1966 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1967 "output high\n", gpio_num, gpio_shift);
1968 /* clear CLR and set SET */
1969 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1970 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 break;
1972
1973 default:
1974 break;
1975 }
1976
1977 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1978 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1979
1980 return 0;
1981}
1982
Eliezer Tamirf1410642008-02-28 11:51:50 -08001983static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1984{
1985 u32 spio_mask = (1 << spio_num);
1986 u32 spio_reg;
1987
1988 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1989 (spio_num > MISC_REGISTERS_SPIO_7)) {
1990 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1991 return -EINVAL;
1992 }
1993
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001994 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001995 /* read SPIO and mask except the float bits */
1996 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1997
1998 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001999 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002000 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2001 /* clear FLOAT and set CLR */
2002 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2003 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2004 break;
2005
Eilon Greenstein6378c022008-08-13 15:59:25 -07002006 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002007 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2008 /* clear FLOAT and set SET */
2009 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2010 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2011 break;
2012
2013 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2014 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2015 /* set FLOAT */
2016 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2017 break;
2018
2019 default:
2020 break;
2021 }
2022
2023 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002024 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002025
2026 return 0;
2027}
2028
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002029static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002030{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002031 switch (bp->link_vars.ieee_fc &
2032 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002033 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002034 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002035 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002036 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002037
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002038 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002039 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002040 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002041 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002042
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002043 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002044 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002045 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002046
Eliezer Tamirf1410642008-02-28 11:51:50 -08002047 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002048 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002049 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002050 break;
2051 }
2052}
2053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002054static void bnx2x_link_report(struct bnx2x *bp)
2055{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002056 if (bp->link_vars.link_up) {
2057 if (bp->state == BNX2X_STATE_OPEN)
2058 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002059 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2060
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002061 printk("%d Mbps ", bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002062
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002063 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002064 printk("full duplex");
2065 else
2066 printk("half duplex");
2067
David S. Millerc0700f92008-12-16 23:53:20 -08002068 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2069 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002070 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002071 if (bp->link_vars.flow_ctrl &
2072 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002073 printk("& transmit ");
2074 } else {
2075 printk(", transmit ");
2076 }
2077 printk("flow control ON");
2078 }
2079 printk("\n");
2080
2081 } else { /* link_down */
2082 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002083 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002084 }
2085}
2086
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002087static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002088{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002089 if (!BP_NOMCP(bp)) {
2090 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002091
Eilon Greenstein19680c42008-08-13 15:47:33 -07002092 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002093 /* It is recommended to turn off RX FC for jumbo frames
2094 for better performance */
2095 if (IS_E1HMF(bp))
David S. Millerc0700f92008-12-16 23:53:20 -08002096 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002097 else if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002098 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002099 else
David S. Millerc0700f92008-12-16 23:53:20 -08002100 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002101
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002102 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002103
2104 if (load_mode == LOAD_DIAG)
2105 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2106
Eilon Greenstein19680c42008-08-13 15:47:33 -07002107 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002108
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002109 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002110
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002111 bnx2x_calc_fc_adv(bp);
2112
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002113 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2114 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002115 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002116 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002117
Eilon Greenstein19680c42008-08-13 15:47:33 -07002118 return rc;
2119 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002120 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002121 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002122}
2123
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002124static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002125{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002126 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002127 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002128 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002129 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002130
Eilon Greenstein19680c42008-08-13 15:47:33 -07002131 bnx2x_calc_fc_adv(bp);
2132 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002133 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002134}
2135
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002136static void bnx2x__link_reset(struct bnx2x *bp)
2137{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002138 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002139 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002140 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002141 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002142 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002143 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002144}
2145
2146static u8 bnx2x_link_test(struct bnx2x *bp)
2147{
2148 u8 rc;
2149
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002150 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002151 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002152 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002153
2154 return rc;
2155}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002156
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002157static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002158{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002159 u32 r_param = bp->link_vars.line_speed / 8;
2160 u32 fair_periodic_timeout_usec;
2161 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002162
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002163 memset(&(bp->cmng.rs_vars), 0,
2164 sizeof(struct rate_shaping_vars_per_port));
2165 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002166
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002167 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2168 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002169
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002170 /* this is the threshold below which no timer arming will occur
2171 1.25 coefficient is for the threshold to be a little bigger
2172 than the real time, to compensate for timer in-accuracy */
2173 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002174 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2175
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002176 /* resolution of fairness timer */
2177 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2178 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2179 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002180
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002181 /* this is the threshold below which we won't arm the timer anymore */
2182 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002183
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002184 /* we multiply by 1e3/8 to get bytes/msec.
2185 We don't want the credits to pass a credit
2186 of the t_fair*FAIR_MEM (algorithm resolution) */
2187 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2188 /* since each tick is 4 usec */
2189 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002190}
2191
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002192static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002193{
2194 struct rate_shaping_vars_per_vn m_rs_vn;
2195 struct fairness_vars_per_vn m_fair_vn;
2196 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2197 u16 vn_min_rate, vn_max_rate;
2198 int i;
2199
2200 /* If function is hidden - set min and max to zeroes */
2201 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2202 vn_min_rate = 0;
2203 vn_max_rate = 0;
2204
2205 } else {
2206 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2207 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002208 /* If fairness is enabled (not all min rates are zeroes) and
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002209 if current min rate is zero - set it to 1.
Eilon Greenstein33471622008-08-13 15:59:08 -07002210 This is a requirement of the algorithm. */
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002211 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002212 vn_min_rate = DEF_MIN_RATE;
2213 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2214 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2215 }
2216
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002217 DP(NETIF_MSG_IFUP,
2218 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2219 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002220
2221 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2222 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2223
2224 /* global vn counter - maximal Mbps for this vn */
2225 m_rs_vn.vn_counter.rate = vn_max_rate;
2226
2227 /* quota - number of bytes transmitted in this period */
2228 m_rs_vn.vn_counter.quota =
2229 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2230
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002231 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002232 /* credit for each period of the fairness algorithm:
2233 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002234 vn_weight_sum should not be larger than 10000, thus
2235 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2236 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002237 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002238 max((u32)(vn_min_rate * (T_FAIR_COEF /
2239 (8 * bp->vn_weight_sum))),
2240 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002241 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2242 m_fair_vn.vn_credit_delta);
2243 }
2244
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002245 /* Store it to internal memory */
2246 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247 REG_WR(bp, BAR_XSTRORM_INTMEM +
2248 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249 ((u32 *)(&m_rs_vn))[i]);
2250
2251 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252 REG_WR(bp, BAR_XSTRORM_INTMEM +
2253 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254 ((u32 *)(&m_fair_vn))[i]);
2255}
2256
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002257
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002258/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002259static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002260{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002261 /* Make sure that we are synced with the current statistics */
2262 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2263
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002264 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002265
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002266 if (bp->link_vars.link_up) {
2267
Eilon Greenstein1c063282009-02-12 08:36:43 +00002268 /* dropless flow control */
2269 if (CHIP_IS_E1H(bp)) {
2270 int port = BP_PORT(bp);
2271 u32 pause_enabled = 0;
2272
2273 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2274 pause_enabled = 1;
2275
2276 REG_WR(bp, BAR_USTRORM_INTMEM +
2277 USTORM_PAUSE_ENABLED_OFFSET(port),
2278 pause_enabled);
2279 }
2280
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002281 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2282 struct host_port_stats *pstats;
2283
2284 pstats = bnx2x_sp(bp, port_stats);
2285 /* reset old bmac stats */
2286 memset(&(pstats->mac_stx[0]), 0,
2287 sizeof(struct mac_stx));
2288 }
2289 if ((bp->state == BNX2X_STATE_OPEN) ||
2290 (bp->state == BNX2X_STATE_DISABLED))
2291 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2292 }
2293
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002294 /* indicate link status */
2295 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002296
2297 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002298 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002299 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002300 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002301
2302 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2303 if (vn == BP_E1HVN(bp))
2304 continue;
2305
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002306 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002307
2308 /* Set the attention towards other drivers
2309 on the same port */
2310 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2311 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2312 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002313
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002314 if (bp->link_vars.link_up) {
2315 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002316
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002317 /* Init rate shaping and fairness contexts */
2318 bnx2x_init_port_minmax(bp);
2319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002320 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002321 bnx2x_init_vn_minmax(bp, 2*vn + port);
2322
2323 /* Store it to internal memory */
2324 for (i = 0;
2325 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2326 REG_WR(bp, BAR_XSTRORM_INTMEM +
2327 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2328 ((u32 *)(&bp->cmng))[i]);
2329 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002330 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002331}
2332
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002333static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002334{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002335 if (bp->state != BNX2X_STATE_OPEN)
2336 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002337
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002338 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2339
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002340 if (bp->link_vars.link_up)
2341 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2342 else
2343 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2344
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002345 /* indicate link status */
2346 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002347}
2348
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002349static void bnx2x_pmf_update(struct bnx2x *bp)
2350{
2351 int port = BP_PORT(bp);
2352 u32 val;
2353
2354 bp->port.pmf = 1;
2355 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2356
2357 /* enable nig attention */
2358 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2359 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002361
2362 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002363}
2364
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002365/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002366
2367/* slow path */
2368
2369/*
2370 * General service functions
2371 */
2372
2373/* the slow path queue is odd since completions arrive on the fastpath ring */
2374static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2375 u32 data_hi, u32 data_lo, int common)
2376{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002378
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002379 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2380 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002381 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2382 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2383 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2384
2385#ifdef BNX2X_STOP_ON_ERROR
2386 if (unlikely(bp->panic))
2387 return -EIO;
2388#endif
2389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002390 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002391
2392 if (!bp->spq_left) {
2393 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002394 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002395 bnx2x_panic();
2396 return -EBUSY;
2397 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002398
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002399 /* CID needs port number to be encoded int it */
2400 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2401 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2402 HW_CID(bp, cid)));
2403 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2404 if (common)
2405 bp->spq_prod_bd->hdr.type |=
2406 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2407
2408 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2409 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2410
2411 bp->spq_left--;
2412
2413 if (bp->spq_prod_bd == bp->spq_last_bd) {
2414 bp->spq_prod_bd = bp->spq;
2415 bp->spq_prod_idx = 0;
2416 DP(NETIF_MSG_TIMER, "end of spq\n");
2417
2418 } else {
2419 bp->spq_prod_bd++;
2420 bp->spq_prod_idx++;
2421 }
2422
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002423 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002424 bp->spq_prod_idx);
2425
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002426 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002427 return 0;
2428}
2429
2430/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002431static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002432{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002433 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002434 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002435
2436 might_sleep();
2437 i = 100;
2438 for (j = 0; j < i*10; j++) {
2439 val = (1UL << 31);
2440 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2441 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2442 if (val & (1L << 31))
2443 break;
2444
2445 msleep(5);
2446 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002447 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002448 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002449 rc = -EBUSY;
2450 }
2451
2452 return rc;
2453}
2454
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002455/* release split MCP access lock register */
2456static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002457{
2458 u32 val = 0;
2459
2460 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2461}
2462
2463static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2464{
2465 struct host_def_status_block *def_sb = bp->def_status_blk;
2466 u16 rc = 0;
2467
2468 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002469 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2470 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2471 rc |= 1;
2472 }
2473 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2474 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2475 rc |= 2;
2476 }
2477 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2478 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2479 rc |= 4;
2480 }
2481 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2482 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2483 rc |= 8;
2484 }
2485 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2486 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2487 rc |= 16;
2488 }
2489 return rc;
2490}
2491
2492/*
2493 * slow path service functions
2494 */
2495
2496static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2497{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002498 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002499 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2500 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002501 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2502 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002503 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2504 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002505 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002506 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002507
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002508 if (bp->attn_state & asserted)
2509 BNX2X_ERR("IGU ERROR\n");
2510
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002511 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2512 aeu_mask = REG_RD(bp, aeu_addr);
2513
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002514 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002515 aeu_mask, asserted);
2516 aeu_mask &= ~(asserted & 0xff);
2517 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002518
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002519 REG_WR(bp, aeu_addr, aeu_mask);
2520 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002521
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002522 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002523 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002524 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002525
2526 if (asserted & ATTN_HARD_WIRED_MASK) {
2527 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002528
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002529 bnx2x_acquire_phy_lock(bp);
2530
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002531 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002532 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002533 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002534
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002535 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002536
2537 /* handle unicore attn? */
2538 }
2539 if (asserted & ATTN_SW_TIMER_4_FUNC)
2540 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2541
2542 if (asserted & GPIO_2_FUNC)
2543 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2544
2545 if (asserted & GPIO_3_FUNC)
2546 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2547
2548 if (asserted & GPIO_4_FUNC)
2549 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2550
2551 if (port == 0) {
2552 if (asserted & ATTN_GENERAL_ATTN_1) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2555 }
2556 if (asserted & ATTN_GENERAL_ATTN_2) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2559 }
2560 if (asserted & ATTN_GENERAL_ATTN_3) {
2561 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2562 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2563 }
2564 } else {
2565 if (asserted & ATTN_GENERAL_ATTN_4) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_5) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2572 }
2573 if (asserted & ATTN_GENERAL_ATTN_6) {
2574 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2575 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2576 }
2577 }
2578
2579 } /* if hardwired */
2580
Eilon Greenstein5c862842008-08-13 15:51:48 -07002581 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2582 asserted, hc_addr);
2583 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002584
2585 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002586 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002587 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002588 bnx2x_release_phy_lock(bp);
2589 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002590}
2591
2592static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2593{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002594 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002595 int reg_offset;
2596 u32 val;
2597
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002598 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2599 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002600
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002601 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002602
2603 val = REG_RD(bp, reg_offset);
2604 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2605 REG_WR(bp, reg_offset, val);
2606
2607 BNX2X_ERR("SPIO5 hw attention\n");
2608
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002609 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2610 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002611 /* Fan failure attention */
2612
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002613 /* The PHY reset is controlled by GPIO 1 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2616 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002617 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002618 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002619 /* mark the failure */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002620 bp->link_params.ext_phy_config &=
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002621 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002622 bp->link_params.ext_phy_config |=
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002623 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2624 SHMEM_WR(bp,
2625 dev_info.port_hw_config[port].
2626 external_phy_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002627 bp->link_params.ext_phy_config);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002628 /* log the failure */
2629 printk(KERN_ERR PFX "Fan Failure on Network"
2630 " Controller %s has caused the driver to"
2631 " shutdown the card to prevent permanent"
2632 " damage. Please contact Dell Support for"
2633 " assistance\n", bp->dev->name);
2634 break;
2635
2636 default:
2637 break;
2638 }
2639 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002640
Eilon Greenstein589abe32009-02-12 08:36:55 +00002641 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2642 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2643 bnx2x_acquire_phy_lock(bp);
2644 bnx2x_handle_module_detect_int(&bp->link_params);
2645 bnx2x_release_phy_lock(bp);
2646 }
2647
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002648 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2649
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2652 REG_WR(bp, reg_offset, val);
2653
2654 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_0));
2656 bnx2x_panic();
2657 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002658}
2659
2660static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2661{
2662 u32 val;
2663
Eilon Greenstein0626b892009-02-12 08:38:14 +00002664 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002665
2666 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2667 BNX2X_ERR("DB hw attention 0x%x\n", val);
2668 /* DORQ discard attention */
2669 if (val & 0x2)
2670 BNX2X_ERR("FATAL error from DORQ\n");
2671 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002672
2673 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2674
2675 int port = BP_PORT(bp);
2676 int reg_offset;
2677
2678 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2679 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2680
2681 val = REG_RD(bp, reg_offset);
2682 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2683 REG_WR(bp, reg_offset, val);
2684
2685 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2686 (attn & HW_INTERRUT_ASSERT_SET_1));
2687 bnx2x_panic();
2688 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002689}
2690
2691static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2692{
2693 u32 val;
2694
2695 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2696
2697 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2698 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2699 /* CFC error attention */
2700 if (val & 0x2)
2701 BNX2X_ERR("FATAL error from CFC\n");
2702 }
2703
2704 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2705
2706 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2707 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2708 /* RQ_USDMDP_FIFO_OVERFLOW */
2709 if (val & 0x18000)
2710 BNX2X_ERR("FATAL error from PXP\n");
2711 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002712
2713 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2714
2715 int port = BP_PORT(bp);
2716 int reg_offset;
2717
2718 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2720
2721 val = REG_RD(bp, reg_offset);
2722 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2723 REG_WR(bp, reg_offset, val);
2724
2725 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2726 (attn & HW_INTERRUT_ASSERT_SET_2));
2727 bnx2x_panic();
2728 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002729}
2730
2731static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2732{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002733 u32 val;
2734
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002735 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2736
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002737 if (attn & BNX2X_PMF_LINK_ASSERT) {
2738 int func = BP_FUNC(bp);
2739
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2741 bnx2x__link_status_update(bp);
2742 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2743 DRV_STATUS_PMF)
2744 bnx2x_pmf_update(bp);
2745
2746 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002747
2748 BNX2X_ERR("MC assert!\n");
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2751 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2752 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2753 bnx2x_panic();
2754
2755 } else if (attn & BNX2X_MCP_ASSERT) {
2756
2757 BNX2X_ERR("MCP assert!\n");
2758 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002759 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002760
2761 } else
2762 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2763 }
2764
2765 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002766 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2767 if (attn & BNX2X_GRC_TIMEOUT) {
2768 val = CHIP_IS_E1H(bp) ?
2769 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2770 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2771 }
2772 if (attn & BNX2X_GRC_RSV) {
2773 val = CHIP_IS_E1H(bp) ?
2774 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2775 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2776 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002777 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002778 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779}
2780
2781static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2782{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002783 struct attn_route attn;
2784 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002785 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002786 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002787 u32 reg_addr;
2788 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002789 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790
2791 /* need to take HW lock because MCP or other port might also
2792 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002793 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002794
2795 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2796 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2797 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2798 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002799 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2800 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002801
2802 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2803 if (deasserted & (1 << index)) {
2804 group_mask = bp->attn_group[index];
2805
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002806 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2807 index, group_mask.sig[0], group_mask.sig[1],
2808 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002809
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002810 bnx2x_attn_int_deasserted3(bp,
2811 attn.sig[3] & group_mask.sig[3]);
2812 bnx2x_attn_int_deasserted1(bp,
2813 attn.sig[1] & group_mask.sig[1]);
2814 bnx2x_attn_int_deasserted2(bp,
2815 attn.sig[2] & group_mask.sig[2]);
2816 bnx2x_attn_int_deasserted0(bp,
2817 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002818
2819 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002820 HW_PRTY_ASSERT_SET_0) ||
2821 (attn.sig[1] & group_mask.sig[1] &
2822 HW_PRTY_ASSERT_SET_1) ||
2823 (attn.sig[2] & group_mask.sig[2] &
2824 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07002825 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002826 }
2827 }
2828
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002829 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002830
Eilon Greenstein5c862842008-08-13 15:51:48 -07002831 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832
2833 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002834 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2835 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002836 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002837
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002838 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002839 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002840
2841 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2842 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2843
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2845 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002846
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002847 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2848 aeu_mask, deasserted);
2849 aeu_mask |= (deasserted & 0xff);
2850 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2851
2852 REG_WR(bp, reg_addr, aeu_mask);
2853 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002854
2855 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2856 bp->attn_state &= ~deasserted;
2857 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2858}
2859
2860static void bnx2x_attn_int(struct bnx2x *bp)
2861{
2862 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08002863 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2864 attn_bits);
2865 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002867 u32 attn_state = bp->attn_state;
2868
2869 /* look for changed bits */
2870 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2871 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2872
2873 DP(NETIF_MSG_HW,
2874 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2875 attn_bits, attn_ack, asserted, deasserted);
2876
2877 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002878 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002879
2880 /* handle bits that were raised */
2881 if (asserted)
2882 bnx2x_attn_int_asserted(bp, asserted);
2883
2884 if (deasserted)
2885 bnx2x_attn_int_deasserted(bp, deasserted);
2886}
2887
2888static void bnx2x_sp_task(struct work_struct *work)
2889{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002890 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002891 u16 status;
2892
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002894 /* Return here if interrupt is disabled */
2895 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002896 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002897 return;
2898 }
2899
2900 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002901/* if (status == 0) */
2902/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002903
Eilon Greenstein3196a882008-08-13 15:58:49 -07002904 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002905
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002906 /* HW attentions */
2907 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002908 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002909
Eilon Greenstein68d59482009-01-14 21:27:36 -08002910 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2913 IGU_INT_NOP, 1);
2914 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2915 IGU_INT_NOP, 1);
2916 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2917 IGU_INT_NOP, 1);
2918 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2919 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002920
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002921}
2922
2923static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2924{
2925 struct net_device *dev = dev_instance;
2926 struct bnx2x *bp = netdev_priv(dev);
2927
2928 /* Return here if interrupt is disabled */
2929 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002930 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002931 return IRQ_HANDLED;
2932 }
2933
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08002934 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002935
2936#ifdef BNX2X_STOP_ON_ERROR
2937 if (unlikely(bp->panic))
2938 return IRQ_HANDLED;
2939#endif
2940
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002941 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002942
2943 return IRQ_HANDLED;
2944}
2945
2946/* end of slow path */
2947
2948/* Statistics */
2949
2950/****************************************************************************
2951* Macros
2952****************************************************************************/
2953
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002954/* sum[hi:lo] += add[hi:lo] */
2955#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2956 do { \
2957 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08002958 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002959 } while (0)
2960
2961/* difference = minuend - subtrahend */
2962#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2963 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002964 if (m_lo < s_lo) { \
2965 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002966 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002967 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002968 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002969 d_hi--; \
2970 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002971 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002972 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002973 d_hi = 0; \
2974 d_lo = 0; \
2975 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002976 } else { \
2977 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002978 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002979 d_hi = 0; \
2980 d_lo = 0; \
2981 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002982 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002983 d_hi = m_hi - s_hi; \
2984 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002985 } \
2986 } \
2987 } while (0)
2988
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002989#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002990 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002991 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2992 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2993 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2994 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2995 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2996 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002997 } while (0)
2998
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002999#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003000 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003001 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3002 diff.lo, new->s##_lo, old->s##_lo); \
3003 ADD_64(estats->t##_hi, diff.hi, \
3004 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003005 } while (0)
3006
3007/* sum[hi:lo] += add */
3008#define ADD_EXTEND_64(s_hi, s_lo, a) \
3009 do { \
3010 s_lo += a; \
3011 s_hi += (s_lo < a) ? 1 : 0; \
3012 } while (0)
3013
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003014#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003015 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003016 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3017 pstats->mac_stx[1].s##_lo, \
3018 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003019 } while (0)
3020
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003021#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003022 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003023 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3024 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003025 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3026 } while (0)
3027
3028#define UPDATE_EXTEND_USTAT(s, t) \
3029 do { \
3030 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3031 old_uclient->s = uclient->s; \
3032 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003033 } while (0)
3034
3035#define UPDATE_EXTEND_XSTAT(s, t) \
3036 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003037 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3038 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003039 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3040 } while (0)
3041
3042/* minuend -= subtrahend */
3043#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3044 do { \
3045 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3046 } while (0)
3047
3048/* minuend[hi:lo] -= subtrahend */
3049#define SUB_EXTEND_64(m_hi, m_lo, s) \
3050 do { \
3051 SUB_64(m_hi, 0, m_lo, s); \
3052 } while (0)
3053
3054#define SUB_EXTEND_USTAT(s, t) \
3055 do { \
3056 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3057 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003058 } while (0)
3059
3060/*
3061 * General service functions
3062 */
3063
3064static inline long bnx2x_hilo(u32 *hiref)
3065{
3066 u32 lo = *(hiref + 1);
3067#if (BITS_PER_LONG == 64)
3068 u32 hi = *hiref;
3069
3070 return HILO_U64(hi, lo);
3071#else
3072 return lo;
3073#endif
3074}
3075
3076/*
3077 * Init service functions
3078 */
3079
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003080static void bnx2x_storm_stats_post(struct bnx2x *bp)
3081{
3082 if (!bp->stats_pending) {
3083 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003084 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003085
3086 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003087 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003088 for_each_queue(bp, i)
3089 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003090
3091 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3092 ((u32 *)&ramrod_data)[1],
3093 ((u32 *)&ramrod_data)[0], 0);
3094 if (rc == 0) {
3095 /* stats ramrod has it's own slot on the spq */
3096 bp->spq_left++;
3097 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003098 }
3099 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003100}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003101
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003102static void bnx2x_stats_init(struct bnx2x *bp)
3103{
3104 int port = BP_PORT(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003105 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003106
Eilon Greensteinde832a52009-02-12 08:36:33 +00003107 bp->stats_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003108 bp->executer_idx = 0;
3109 bp->stats_counter = 0;
3110
3111 /* port stats */
3112 if (!BP_NOMCP(bp))
3113 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3114 else
3115 bp->port.port_stx = 0;
3116 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3117
3118 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3119 bp->port.old_nig_stats.brb_discard =
3120 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003121 bp->port.old_nig_stats.brb_truncate =
3122 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003123 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3124 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3125 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3126 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3127
3128 /* function stats */
Eilon Greensteinde832a52009-02-12 08:36:33 +00003129 for_each_queue(bp, i) {
3130 struct bnx2x_fastpath *fp = &bp->fp[i];
3131
3132 memset(&fp->old_tclient, 0,
3133 sizeof(struct tstorm_per_client_stats));
3134 memset(&fp->old_uclient, 0,
3135 sizeof(struct ustorm_per_client_stats));
3136 memset(&fp->old_xclient, 0,
3137 sizeof(struct xstorm_per_client_stats));
3138 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3139 }
3140
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003141 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003142 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3143
3144 bp->stats_state = STATS_STATE_DISABLED;
3145 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3146 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3147}
3148
3149static void bnx2x_hw_stats_post(struct bnx2x *bp)
3150{
3151 struct dmae_command *dmae = &bp->stats_dmae;
3152 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3153
3154 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003155 if (CHIP_REV_IS_SLOW(bp))
3156 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003157
3158 /* loader */
3159 if (bp->executer_idx) {
3160 int loader_idx = PMF_DMAE_C(bp);
3161
3162 memset(dmae, 0, sizeof(struct dmae_command));
3163
3164 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3165 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3166 DMAE_CMD_DST_RESET |
3167#ifdef __BIG_ENDIAN
3168 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3169#else
3170 DMAE_CMD_ENDIANITY_DW_SWAP |
3171#endif
3172 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3173 DMAE_CMD_PORT_0) |
3174 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3175 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3176 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3177 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3178 sizeof(struct dmae_command) *
3179 (loader_idx + 1)) >> 2;
3180 dmae->dst_addr_hi = 0;
3181 dmae->len = sizeof(struct dmae_command) >> 2;
3182 if (CHIP_IS_E1(bp))
3183 dmae->len--;
3184 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3185 dmae->comp_addr_hi = 0;
3186 dmae->comp_val = 1;
3187
3188 *stats_comp = 0;
3189 bnx2x_post_dmae(bp, dmae, loader_idx);
3190
3191 } else if (bp->func_stx) {
3192 *stats_comp = 0;
3193 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3194 }
3195}
3196
3197static int bnx2x_stats_comp(struct bnx2x *bp)
3198{
3199 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3200 int cnt = 10;
3201
3202 might_sleep();
3203 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003204 if (!cnt) {
3205 BNX2X_ERR("timeout waiting for stats finished\n");
3206 break;
3207 }
3208 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003209 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003210 }
3211 return 1;
3212}
3213
3214/*
3215 * Statistics service functions
3216 */
3217
3218static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3219{
3220 struct dmae_command *dmae;
3221 u32 opcode;
3222 int loader_idx = PMF_DMAE_C(bp);
3223 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3224
3225 /* sanity */
3226 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3227 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003228 return;
3229 }
3230
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003231 bp->executer_idx = 0;
3232
3233 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3234 DMAE_CMD_C_ENABLE |
3235 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3236#ifdef __BIG_ENDIAN
3237 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3238#else
3239 DMAE_CMD_ENDIANITY_DW_SWAP |
3240#endif
3241 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3242 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3243
3244 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3245 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3246 dmae->src_addr_lo = bp->port.port_stx >> 2;
3247 dmae->src_addr_hi = 0;
3248 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3249 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3250 dmae->len = DMAE_LEN32_RD_MAX;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3253 dmae->comp_val = 1;
3254
3255 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3257 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3258 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003259 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3260 DMAE_LEN32_RD_MAX * 4);
3261 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3262 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003263 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3266 dmae->comp_val = DMAE_COMP_VAL;
3267
3268 *stats_comp = 0;
3269 bnx2x_hw_stats_post(bp);
3270 bnx2x_stats_comp(bp);
3271}
3272
3273static void bnx2x_port_stats_init(struct bnx2x *bp)
3274{
3275 struct dmae_command *dmae;
3276 int port = BP_PORT(bp);
3277 int vn = BP_E1HVN(bp);
3278 u32 opcode;
3279 int loader_idx = PMF_DMAE_C(bp);
3280 u32 mac_addr;
3281 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3282
3283 /* sanity */
3284 if (!bp->link_vars.link_up || !bp->port.pmf) {
3285 BNX2X_ERR("BUG!\n");
3286 return;
3287 }
3288
3289 bp->executer_idx = 0;
3290
3291 /* MCP */
3292 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3293 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3294 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3295#ifdef __BIG_ENDIAN
3296 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3297#else
3298 DMAE_CMD_ENDIANITY_DW_SWAP |
3299#endif
3300 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3301 (vn << DMAE_CMD_E1HVN_SHIFT));
3302
3303 if (bp->port.port_stx) {
3304
3305 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3306 dmae->opcode = opcode;
3307 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3308 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3309 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3310 dmae->dst_addr_hi = 0;
3311 dmae->len = sizeof(struct host_port_stats) >> 2;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3314 dmae->comp_val = 1;
3315 }
3316
3317 if (bp->func_stx) {
3318
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3322 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3323 dmae->dst_addr_lo = bp->func_stx >> 2;
3324 dmae->dst_addr_hi = 0;
3325 dmae->len = sizeof(struct host_func_stats) >> 2;
3326 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3327 dmae->comp_addr_hi = 0;
3328 dmae->comp_val = 1;
3329 }
3330
3331 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003332 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3333 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3335#ifdef __BIG_ENDIAN
3336 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3337#else
3338 DMAE_CMD_ENDIANITY_DW_SWAP |
3339#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003340 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003343 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003344
3345 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3346 NIG_REG_INGRESS_BMAC0_MEM);
3347
3348 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3349 BIGMAC_REGISTER_TX_STAT_GTBYT */
3350 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3351 dmae->opcode = opcode;
3352 dmae->src_addr_lo = (mac_addr +
3353 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3354 dmae->src_addr_hi = 0;
3355 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3357 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3358 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3361 dmae->comp_val = 1;
3362
3363 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3364 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (mac_addr +
3368 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003371 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003372 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003373 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003374 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3375 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3378 dmae->comp_val = 1;
3379
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003380 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003381
3382 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3383
3384 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3385 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3386 dmae->opcode = opcode;
3387 dmae->src_addr_lo = (mac_addr +
3388 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3389 dmae->src_addr_hi = 0;
3390 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3391 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3392 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3393 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3394 dmae->comp_addr_hi = 0;
3395 dmae->comp_val = 1;
3396
3397 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = opcode;
3400 dmae->src_addr_lo = (mac_addr +
3401 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3402 dmae->src_addr_hi = 0;
3403 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003404 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003405 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003406 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003407 dmae->len = 1;
3408 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3409 dmae->comp_addr_hi = 0;
3410 dmae->comp_val = 1;
3411
3412 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3414 dmae->opcode = opcode;
3415 dmae->src_addr_lo = (mac_addr +
3416 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3417 dmae->src_addr_hi = 0;
3418 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003419 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003420 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003421 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003422 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3423 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3424 dmae->comp_addr_hi = 0;
3425 dmae->comp_val = 1;
3426 }
3427
3428 /* NIG */
3429 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003430 dmae->opcode = opcode;
3431 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3432 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3433 dmae->src_addr_hi = 0;
3434 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3435 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3436 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3437 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3438 dmae->comp_addr_hi = 0;
3439 dmae->comp_val = 1;
3440
3441 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3442 dmae->opcode = opcode;
3443 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3444 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3445 dmae->src_addr_hi = 0;
3446 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3448 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3449 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450 dmae->len = (2*sizeof(u32)) >> 2;
3451 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3452 dmae->comp_addr_hi = 0;
3453 dmae->comp_val = 1;
3454
3455 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003456 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3457 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3458 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3459#ifdef __BIG_ENDIAN
3460 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3461#else
3462 DMAE_CMD_ENDIANITY_DW_SWAP |
3463#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003464 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3465 (vn << DMAE_CMD_E1HVN_SHIFT));
3466 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3467 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003468 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003469 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3470 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3471 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3472 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473 dmae->len = (2*sizeof(u32)) >> 2;
3474 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3475 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3476 dmae->comp_val = DMAE_COMP_VAL;
3477
3478 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003479}
3480
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003481static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003482{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003483 struct dmae_command *dmae = &bp->stats_dmae;
3484 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003485
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003486 /* sanity */
3487 if (!bp->func_stx) {
3488 BNX2X_ERR("BUG!\n");
3489 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003490 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003491
3492 bp->executer_idx = 0;
3493 memset(dmae, 0, sizeof(struct dmae_command));
3494
3495 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3496 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3497 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3498#ifdef __BIG_ENDIAN
3499 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3500#else
3501 DMAE_CMD_ENDIANITY_DW_SWAP |
3502#endif
3503 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3504 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3505 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3506 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3507 dmae->dst_addr_lo = bp->func_stx >> 2;
3508 dmae->dst_addr_hi = 0;
3509 dmae->len = sizeof(struct host_func_stats) >> 2;
3510 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3511 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3512 dmae->comp_val = DMAE_COMP_VAL;
3513
3514 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003515}
3516
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003517static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003518{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003519 if (bp->port.pmf)
3520 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003521
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003522 else if (bp->func_stx)
3523 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003524
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003525 bnx2x_hw_stats_post(bp);
3526 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003527}
3528
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003529static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003530{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003531 bnx2x_stats_comp(bp);
3532 bnx2x_stats_pmf_update(bp);
3533 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003534}
3535
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003536static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003537{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003538 bnx2x_stats_comp(bp);
3539 bnx2x_stats_start(bp);
3540}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003541
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003542static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3543{
3544 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3545 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003546 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003547 struct {
3548 u32 lo;
3549 u32 hi;
3550 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003551
3552 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3553 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3554 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3555 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3556 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3557 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003558 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003559 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003560 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003561 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3562 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3563 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3564 UPDATE_STAT64(tx_stat_gt127,
3565 tx_stat_etherstatspkts65octetsto127octets);
3566 UPDATE_STAT64(tx_stat_gt255,
3567 tx_stat_etherstatspkts128octetsto255octets);
3568 UPDATE_STAT64(tx_stat_gt511,
3569 tx_stat_etherstatspkts256octetsto511octets);
3570 UPDATE_STAT64(tx_stat_gt1023,
3571 tx_stat_etherstatspkts512octetsto1023octets);
3572 UPDATE_STAT64(tx_stat_gt1518,
3573 tx_stat_etherstatspkts1024octetsto1522octets);
3574 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3575 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3576 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3577 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3578 UPDATE_STAT64(tx_stat_gterr,
3579 tx_stat_dot3statsinternalmactransmiterrors);
3580 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003581
3582 estats->pause_frames_received_hi =
3583 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3584 estats->pause_frames_received_lo =
3585 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3586
3587 estats->pause_frames_sent_hi =
3588 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3589 estats->pause_frames_sent_lo =
3590 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003591}
3592
3593static void bnx2x_emac_stats_update(struct bnx2x *bp)
3594{
3595 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3596 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003597 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003598
3599 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3600 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3601 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3602 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3603 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3604 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3605 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3606 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3607 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3608 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3609 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3610 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3611 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3612 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3613 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3614 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3615 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3616 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3617 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3618 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3619 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3620 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3621 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3622 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3623 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3624 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3626 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3627 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3628 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003630
3631 estats->pause_frames_received_hi =
3632 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3633 estats->pause_frames_received_lo =
3634 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3635 ADD_64(estats->pause_frames_received_hi,
3636 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3637 estats->pause_frames_received_lo,
3638 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3639
3640 estats->pause_frames_sent_hi =
3641 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3642 estats->pause_frames_sent_lo =
3643 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3644 ADD_64(estats->pause_frames_sent_hi,
3645 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3646 estats->pause_frames_sent_lo,
3647 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003648}
3649
3650static int bnx2x_hw_stats_update(struct bnx2x *bp)
3651{
3652 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3653 struct nig_stats *old = &(bp->port.old_nig_stats);
3654 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003656 struct {
3657 u32 lo;
3658 u32 hi;
3659 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003660 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003661
3662 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3663 bnx2x_bmac_stats_update(bp);
3664
3665 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3666 bnx2x_emac_stats_update(bp);
3667
3668 else { /* unreached */
3669 BNX2X_ERR("stats updated by dmae but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003670 return -1;
3671 }
3672
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003673 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3674 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003675 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3676 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003677
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003678 UPDATE_STAT64_NIG(egress_mac_pkt0,
3679 etherstatspkts1024octetsto1522octets);
3680 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003681
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003682 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003683
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003684 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3685 sizeof(struct mac_stx));
3686 estats->brb_drop_hi = pstats->brb_drop_hi;
3687 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003688
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003689 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003690
Eilon Greensteinde832a52009-02-12 08:36:33 +00003691 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3692 if (nig_timer_max != estats->nig_timer_max) {
3693 estats->nig_timer_max = nig_timer_max;
3694 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3695 }
3696
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003697 return 0;
3698}
3699
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003700static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003701{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003702 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003703 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003704 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003705 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3706 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003707 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003708
Eilon Greensteinde832a52009-02-12 08:36:33 +00003709 memset(&(fstats->total_bytes_received_hi), 0,
3710 sizeof(struct host_func_stats) - 2*sizeof(u32));
3711 estats->error_bytes_received_hi = 0;
3712 estats->error_bytes_received_lo = 0;
3713 estats->etherstatsoverrsizepkts_hi = 0;
3714 estats->etherstatsoverrsizepkts_lo = 0;
3715 estats->no_buff_discard_hi = 0;
3716 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003717
Eilon Greensteinde832a52009-02-12 08:36:33 +00003718 for_each_queue(bp, i) {
3719 struct bnx2x_fastpath *fp = &bp->fp[i];
3720 int cl_id = fp->cl_id;
3721 struct tstorm_per_client_stats *tclient =
3722 &stats->tstorm_common.client_statistics[cl_id];
3723 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3724 struct ustorm_per_client_stats *uclient =
3725 &stats->ustorm_common.client_statistics[cl_id];
3726 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3727 struct xstorm_per_client_stats *xclient =
3728 &stats->xstorm_common.client_statistics[cl_id];
3729 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3730 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3731 u32 diff;
3732
3733 /* are storm stats valid? */
3734 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3735 bp->stats_counter) {
3736 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3737 " xstorm counter (%d) != stats_counter (%d)\n",
3738 i, xclient->stats_counter, bp->stats_counter);
3739 return -1;
3740 }
3741 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3742 bp->stats_counter) {
3743 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3744 " tstorm counter (%d) != stats_counter (%d)\n",
3745 i, tclient->stats_counter, bp->stats_counter);
3746 return -2;
3747 }
3748 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3749 bp->stats_counter) {
3750 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3751 " ustorm counter (%d) != stats_counter (%d)\n",
3752 i, uclient->stats_counter, bp->stats_counter);
3753 return -4;
3754 }
3755
3756 qstats->total_bytes_received_hi =
3757 qstats->valid_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003758 le32_to_cpu(tclient->total_rcv_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003759 qstats->total_bytes_received_lo =
3760 qstats->valid_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003761 le32_to_cpu(tclient->total_rcv_bytes.lo);
3762
Eilon Greensteinde832a52009-02-12 08:36:33 +00003763 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003764 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003765 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003766 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003767
3768 ADD_64(qstats->total_bytes_received_hi,
3769 qstats->error_bytes_received_hi,
3770 qstats->total_bytes_received_lo,
3771 qstats->error_bytes_received_lo);
3772
3773 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3774 total_unicast_packets_received);
3775 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3776 total_multicast_packets_received);
3777 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3778 total_broadcast_packets_received);
3779 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3780 etherstatsoverrsizepkts);
3781 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3782
3783 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3784 total_unicast_packets_received);
3785 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3786 total_multicast_packets_received);
3787 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3788 total_broadcast_packets_received);
3789 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3790 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3791 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3792
3793 qstats->total_bytes_transmitted_hi =
3794 le32_to_cpu(xclient->total_sent_bytes.hi);
3795 qstats->total_bytes_transmitted_lo =
3796 le32_to_cpu(xclient->total_sent_bytes.lo);
3797
3798 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3799 total_unicast_packets_transmitted);
3800 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3801 total_multicast_packets_transmitted);
3802 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3803 total_broadcast_packets_transmitted);
3804
3805 old_tclient->checksum_discard = tclient->checksum_discard;
3806 old_tclient->ttl0_discard = tclient->ttl0_discard;
3807
3808 ADD_64(fstats->total_bytes_received_hi,
3809 qstats->total_bytes_received_hi,
3810 fstats->total_bytes_received_lo,
3811 qstats->total_bytes_received_lo);
3812 ADD_64(fstats->total_bytes_transmitted_hi,
3813 qstats->total_bytes_transmitted_hi,
3814 fstats->total_bytes_transmitted_lo,
3815 qstats->total_bytes_transmitted_lo);
3816 ADD_64(fstats->total_unicast_packets_received_hi,
3817 qstats->total_unicast_packets_received_hi,
3818 fstats->total_unicast_packets_received_lo,
3819 qstats->total_unicast_packets_received_lo);
3820 ADD_64(fstats->total_multicast_packets_received_hi,
3821 qstats->total_multicast_packets_received_hi,
3822 fstats->total_multicast_packets_received_lo,
3823 qstats->total_multicast_packets_received_lo);
3824 ADD_64(fstats->total_broadcast_packets_received_hi,
3825 qstats->total_broadcast_packets_received_hi,
3826 fstats->total_broadcast_packets_received_lo,
3827 qstats->total_broadcast_packets_received_lo);
3828 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3829 qstats->total_unicast_packets_transmitted_hi,
3830 fstats->total_unicast_packets_transmitted_lo,
3831 qstats->total_unicast_packets_transmitted_lo);
3832 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3833 qstats->total_multicast_packets_transmitted_hi,
3834 fstats->total_multicast_packets_transmitted_lo,
3835 qstats->total_multicast_packets_transmitted_lo);
3836 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3837 qstats->total_broadcast_packets_transmitted_hi,
3838 fstats->total_broadcast_packets_transmitted_lo,
3839 qstats->total_broadcast_packets_transmitted_lo);
3840 ADD_64(fstats->valid_bytes_received_hi,
3841 qstats->valid_bytes_received_hi,
3842 fstats->valid_bytes_received_lo,
3843 qstats->valid_bytes_received_lo);
3844
3845 ADD_64(estats->error_bytes_received_hi,
3846 qstats->error_bytes_received_hi,
3847 estats->error_bytes_received_lo,
3848 qstats->error_bytes_received_lo);
3849 ADD_64(estats->etherstatsoverrsizepkts_hi,
3850 qstats->etherstatsoverrsizepkts_hi,
3851 estats->etherstatsoverrsizepkts_lo,
3852 qstats->etherstatsoverrsizepkts_lo);
3853 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3854 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3855 }
3856
3857 ADD_64(fstats->total_bytes_received_hi,
3858 estats->rx_stat_ifhcinbadoctets_hi,
3859 fstats->total_bytes_received_lo,
3860 estats->rx_stat_ifhcinbadoctets_lo);
3861
3862 memcpy(estats, &(fstats->total_bytes_received_hi),
3863 sizeof(struct host_func_stats) - 2*sizeof(u32));
3864
3865 ADD_64(estats->etherstatsoverrsizepkts_hi,
3866 estats->rx_stat_dot3statsframestoolong_hi,
3867 estats->etherstatsoverrsizepkts_lo,
3868 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003869 ADD_64(estats->error_bytes_received_hi,
3870 estats->rx_stat_ifhcinbadoctets_hi,
3871 estats->error_bytes_received_lo,
3872 estats->rx_stat_ifhcinbadoctets_lo);
3873
Eilon Greensteinde832a52009-02-12 08:36:33 +00003874 if (bp->port.pmf) {
3875 estats->mac_filter_discard =
3876 le32_to_cpu(tport->mac_filter_discard);
3877 estats->xxoverflow_discard =
3878 le32_to_cpu(tport->xxoverflow_discard);
3879 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003880 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003881 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3882 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003883
3884 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3885
Eilon Greensteinde832a52009-02-12 08:36:33 +00003886 bp->stats_pending = 0;
3887
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003888 return 0;
3889}
3890
3891static void bnx2x_net_stats_update(struct bnx2x *bp)
3892{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003893 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003894 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003895 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003896
3897 nstats->rx_packets =
3898 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3899 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3900 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3901
3902 nstats->tx_packets =
3903 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3904 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3905 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3906
Eilon Greensteinde832a52009-02-12 08:36:33 +00003907 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003908
Eliezer Tamir0e39e642008-02-28 11:54:03 -08003909 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003910
Eilon Greensteinde832a52009-02-12 08:36:33 +00003911 nstats->rx_dropped = estats->mac_discard;
3912 for_each_queue(bp, i)
3913 nstats->rx_dropped +=
3914 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003916 nstats->tx_dropped = 0;
3917
3918 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003919 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003920
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003921 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003922 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003923
3924 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003925 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3926 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3927 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3928 bnx2x_hilo(&estats->brb_truncate_hi);
3929 nstats->rx_crc_errors =
3930 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3931 nstats->rx_frame_errors =
3932 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3933 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003934 nstats->rx_missed_errors = estats->xxoverflow_discard;
3935
3936 nstats->rx_errors = nstats->rx_length_errors +
3937 nstats->rx_over_errors +
3938 nstats->rx_crc_errors +
3939 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08003940 nstats->rx_fifo_errors +
3941 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003942
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003943 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003944 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3945 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3946 nstats->tx_carrier_errors =
3947 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003948 nstats->tx_fifo_errors = 0;
3949 nstats->tx_heartbeat_errors = 0;
3950 nstats->tx_window_errors = 0;
3951
3952 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00003953 nstats->tx_carrier_errors +
3954 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3955}
3956
3957static void bnx2x_drv_stats_update(struct bnx2x *bp)
3958{
3959 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3960 int i;
3961
3962 estats->driver_xoff = 0;
3963 estats->rx_err_discard_pkt = 0;
3964 estats->rx_skb_alloc_failed = 0;
3965 estats->hw_csum_err = 0;
3966 for_each_queue(bp, i) {
3967 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3968
3969 estats->driver_xoff += qstats->driver_xoff;
3970 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3971 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3972 estats->hw_csum_err += qstats->hw_csum_err;
3973 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003974}
3975
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003976static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003977{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003978 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003979
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003980 if (*stats_comp != DMAE_COMP_VAL)
3981 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003982
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003983 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00003984 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003985
Eilon Greensteinde832a52009-02-12 08:36:33 +00003986 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3987 BNX2X_ERR("storm stats were not updated for 3 times\n");
3988 bnx2x_panic();
3989 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003990 }
3991
Eilon Greensteinde832a52009-02-12 08:36:33 +00003992 bnx2x_net_stats_update(bp);
3993 bnx2x_drv_stats_update(bp);
3994
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003995 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00003996 struct tstorm_per_client_stats *old_tclient =
3997 &bp->fp->old_tclient;
3998 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003999 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004000 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004001 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004002
4003 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4004 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4005 " tx pkt (%lx)\n",
4006 bnx2x_tx_avail(bp->fp),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004007 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004008 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4009 " rx pkt (%lx)\n",
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004010 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4011 bp->fp->rx_comp_cons),
4012 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004013 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4014 "brb truncate %u\n",
4015 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4016 qstats->driver_xoff,
4017 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004018 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004019 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004020 "mac_discard %u mac_filter_discard %u "
4021 "xxovrflow_discard %u brb_truncate_discard %u "
4022 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004023 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004024 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4025 bnx2x_hilo(&qstats->no_buff_discard_hi),
4026 estats->mac_discard, estats->mac_filter_discard,
4027 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004028 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004029
4030 for_each_queue(bp, i) {
4031 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4032 bnx2x_fp(bp, i, tx_pkt),
4033 bnx2x_fp(bp, i, rx_pkt),
4034 bnx2x_fp(bp, i, rx_calls));
4035 }
4036 }
4037
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004038 bnx2x_hw_stats_post(bp);
4039 bnx2x_storm_stats_post(bp);
4040}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004041
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004042static void bnx2x_port_stats_stop(struct bnx2x *bp)
4043{
4044 struct dmae_command *dmae;
4045 u32 opcode;
4046 int loader_idx = PMF_DMAE_C(bp);
4047 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004048
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004049 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004050
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004051 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4052 DMAE_CMD_C_ENABLE |
4053 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004054#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004055 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004056#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004057 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004058#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004059 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4060 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4061
4062 if (bp->port.port_stx) {
4063
4064 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4065 if (bp->func_stx)
4066 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4067 else
4068 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4069 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4070 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4071 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004072 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004073 dmae->len = sizeof(struct host_port_stats) >> 2;
4074 if (bp->func_stx) {
4075 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076 dmae->comp_addr_hi = 0;
4077 dmae->comp_val = 1;
4078 } else {
4079 dmae->comp_addr_lo =
4080 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4081 dmae->comp_addr_hi =
4082 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4083 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004084
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004085 *stats_comp = 0;
4086 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004087 }
4088
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004089 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004090
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004091 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4092 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4093 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4094 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4095 dmae->dst_addr_lo = bp->func_stx >> 2;
4096 dmae->dst_addr_hi = 0;
4097 dmae->len = sizeof(struct host_func_stats) >> 2;
4098 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4099 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4100 dmae->comp_val = DMAE_COMP_VAL;
4101
4102 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004103 }
4104}
4105
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004106static void bnx2x_stats_stop(struct bnx2x *bp)
4107{
4108 int update = 0;
4109
4110 bnx2x_stats_comp(bp);
4111
4112 if (bp->port.pmf)
4113 update = (bnx2x_hw_stats_update(bp) == 0);
4114
4115 update |= (bnx2x_storm_stats_update(bp) == 0);
4116
4117 if (update) {
4118 bnx2x_net_stats_update(bp);
4119
4120 if (bp->port.pmf)
4121 bnx2x_port_stats_stop(bp);
4122
4123 bnx2x_hw_stats_post(bp);
4124 bnx2x_stats_comp(bp);
4125 }
4126}
4127
4128static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4129{
4130}
4131
4132static const struct {
4133 void (*action)(struct bnx2x *bp);
4134 enum bnx2x_stats_state next_state;
4135} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4136/* state event */
4137{
4138/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4139/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4140/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4141/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4142},
4143{
4144/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4145/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4146/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4147/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4148}
4149};
4150
4151static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4152{
4153 enum bnx2x_stats_state state = bp->stats_state;
4154
4155 bnx2x_stats_stm[state][event].action(bp);
4156 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4157
4158 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4159 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4160 state, event, bp->stats_state);
4161}
4162
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163static void bnx2x_timer(unsigned long data)
4164{
4165 struct bnx2x *bp = (struct bnx2x *) data;
4166
4167 if (!netif_running(bp->dev))
4168 return;
4169
4170 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004171 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004172
4173 if (poll) {
4174 struct bnx2x_fastpath *fp = &bp->fp[0];
4175 int rc;
4176
Eilon Greenstein7961f792009-03-02 07:59:31 +00004177 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004178 rc = bnx2x_rx_int(fp, 1000);
4179 }
4180
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004181 if (!BP_NOMCP(bp)) {
4182 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004183 u32 drv_pulse;
4184 u32 mcp_pulse;
4185
4186 ++bp->fw_drv_pulse_wr_seq;
4187 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4188 /* TBD - add SYSTEM_TIME */
4189 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004190 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004192 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193 MCP_PULSE_SEQ_MASK);
4194 /* The delta between driver pulse and mcp response
4195 * should be 1 (before mcp response) or 0 (after mcp response)
4196 */
4197 if ((drv_pulse != mcp_pulse) &&
4198 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4199 /* someone lost a heartbeat... */
4200 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4201 drv_pulse, mcp_pulse);
4202 }
4203 }
4204
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004205 if ((bp->state == BNX2X_STATE_OPEN) ||
4206 (bp->state == BNX2X_STATE_DISABLED))
4207 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004208
Eliezer Tamirf1410642008-02-28 11:51:50 -08004209timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210 mod_timer(&bp->timer, jiffies + bp->current_interval);
4211}
4212
4213/* end of Statistics */
4214
4215/* nic init */
4216
4217/*
4218 * nic init service functions
4219 */
4220
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004221static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004222{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004223 int port = BP_PORT(bp);
4224
4225 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4226 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
Yitchak Gertner35302982008-08-13 15:53:12 -07004227 sizeof(struct ustorm_status_block)/4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004228 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4229 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
Yitchak Gertner35302982008-08-13 15:53:12 -07004230 sizeof(struct cstorm_status_block)/4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004231}
4232
Eilon Greenstein5c862842008-08-13 15:51:48 -07004233static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4234 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004235{
4236 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004237 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004239 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004240
4241 /* USTORM */
4242 section = ((u64)mapping) + offsetof(struct host_status_block,
4243 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004244 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004245
4246 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004247 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004248 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004249 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004250 U64_HI(section));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004251 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4252 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004253
4254 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4255 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004256 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004257
4258 /* CSTORM */
4259 section = ((u64)mapping) + offsetof(struct host_status_block,
4260 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004261 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004262
4263 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004264 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004266 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004267 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004268 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4269 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004270
4271 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4272 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004273 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004274
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004275 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4276}
4277
4278static void bnx2x_zero_def_sb(struct bnx2x *bp)
4279{
4280 int func = BP_FUNC(bp);
4281
4282 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4283 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4284 sizeof(struct ustorm_def_status_block)/4);
4285 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4286 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4287 sizeof(struct cstorm_def_status_block)/4);
4288 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4289 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4290 sizeof(struct xstorm_def_status_block)/4);
4291 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4292 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct tstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294}
4295
4296static void bnx2x_init_def_sb(struct bnx2x *bp,
4297 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004298 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004299{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004300 int port = BP_PORT(bp);
4301 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004302 int index, val, reg_offset;
4303 u64 section;
4304
4305 /* ATTN */
4306 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4307 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004308 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004309
Eliezer Tamir49d66772008-02-28 11:53:13 -08004310 bp->attn_state = 0;
4311
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004312 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4313 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4314
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004315 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004316 bp->attn_group[index].sig[0] = REG_RD(bp,
4317 reg_offset + 0x10*index);
4318 bp->attn_group[index].sig[1] = REG_RD(bp,
4319 reg_offset + 0x4 + 0x10*index);
4320 bp->attn_group[index].sig[2] = REG_RD(bp,
4321 reg_offset + 0x8 + 0x10*index);
4322 bp->attn_group[index].sig[3] = REG_RD(bp,
4323 reg_offset + 0xc + 0x10*index);
4324 }
4325
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4327 HC_REG_ATTN_MSG0_ADDR_L);
4328
4329 REG_WR(bp, reg_offset, U64_LO(section));
4330 REG_WR(bp, reg_offset + 4, U64_HI(section));
4331
4332 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4333
4334 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004335 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004336 REG_WR(bp, reg_offset, val);
4337
4338 /* USTORM */
4339 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4340 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004341 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004342
4343 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004344 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004345 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004346 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004348 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004349 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004350
4351 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4352 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004353 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004354
4355 /* CSTORM */
4356 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4357 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004358 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004359
4360 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004361 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004363 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004364 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004365 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004366 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004367
4368 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4369 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004370 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004371
4372 /* TSTORM */
4373 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4374 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004375 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004376
4377 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004378 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004379 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004380 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004382 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004383 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004384
4385 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4386 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004387 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004388
4389 /* XSTORM */
4390 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4391 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004392 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004393
4394 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004395 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004396 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004397 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004398 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004399 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004400 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004401
4402 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4403 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004404 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004406 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004407 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004408
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004409 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004410}
4411
4412static void bnx2x_update_coalesce(struct bnx2x *bp)
4413{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004414 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004415 int i;
4416
4417 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004418 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004419
4420 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4421 REG_WR8(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004422 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004423 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004424 bp->rx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004425 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004426 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004427 U_SB_ETH_RX_CQ_INDEX),
4428 bp->rx_ticks ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004429
4430 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4431 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004432 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004433 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004434 bp->tx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004435 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004436 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004437 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004438 bp->tx_ticks ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004439 }
4440}
4441
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004442static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4443 struct bnx2x_fastpath *fp, int last)
4444{
4445 int i;
4446
4447 for (i = 0; i < last; i++) {
4448 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4449 struct sk_buff *skb = rx_buf->skb;
4450
4451 if (skb == NULL) {
4452 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4453 continue;
4454 }
4455
4456 if (fp->tpa_state[i] == BNX2X_TPA_START)
4457 pci_unmap_single(bp->pdev,
4458 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004459 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004460
4461 dev_kfree_skb(skb);
4462 rx_buf->skb = NULL;
4463 }
4464}
4465
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004466static void bnx2x_init_rx_rings(struct bnx2x *bp)
4467{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004468 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004469 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4470 ETH_MAX_AGGREGATION_QUEUES_E1H;
4471 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004472 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004473
Eilon Greenstein87942b42009-02-12 08:36:49 +00004474 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004475 DP(NETIF_MSG_IFUP,
4476 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004477
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004478 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004479
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004480 for_each_rx_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004481 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004482
Eilon Greenstein32626232008-08-13 15:51:07 -07004483 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004484 fp->tpa_pool[i].skb =
4485 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4486 if (!fp->tpa_pool[i].skb) {
4487 BNX2X_ERR("Failed to allocate TPA "
4488 "skb pool for queue[%d] - "
4489 "disabling TPA on this "
4490 "queue!\n", j);
4491 bnx2x_free_tpa_pool(bp, fp, i);
4492 fp->disable_tpa = 1;
4493 break;
4494 }
4495 pci_unmap_addr_set((struct sw_rx_bd *)
4496 &bp->fp->tpa_pool[i],
4497 mapping, 0);
4498 fp->tpa_state[i] = BNX2X_TPA_STOP;
4499 }
4500 }
4501 }
4502
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004503 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004504 struct bnx2x_fastpath *fp = &bp->fp[j];
4505
4506 fp->rx_bd_cons = 0;
4507 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004508 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004509
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004510 /* "next page" elements initialization */
4511 /* SGE ring */
4512 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4513 struct eth_rx_sge *sge;
4514
4515 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4516 sge->addr_hi =
4517 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4518 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4519 sge->addr_lo =
4520 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4521 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4522 }
4523
4524 bnx2x_init_sge_ring_bit_mask(fp);
4525
4526 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004527 for (i = 1; i <= NUM_RX_RINGS; i++) {
4528 struct eth_rx_bd *rx_bd;
4529
4530 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4531 rx_bd->addr_hi =
4532 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004533 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004534 rx_bd->addr_lo =
4535 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004536 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004537 }
4538
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004539 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004540 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4541 struct eth_rx_cqe_next_page *nextpg;
4542
4543 nextpg = (struct eth_rx_cqe_next_page *)
4544 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4545 nextpg->addr_hi =
4546 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004547 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004548 nextpg->addr_lo =
4549 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004550 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004551 }
4552
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004553 /* Allocate SGEs and initialize the ring elements */
4554 for (i = 0, ring_prod = 0;
4555 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004556
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004557 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4558 BNX2X_ERR("was only able to allocate "
4559 "%d rx sges\n", i);
4560 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4561 /* Cleanup already allocated elements */
4562 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07004563 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004564 fp->disable_tpa = 1;
4565 ring_prod = 0;
4566 break;
4567 }
4568 ring_prod = NEXT_SGE_IDX(ring_prod);
4569 }
4570 fp->rx_sge_prod = ring_prod;
4571
4572 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004573 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004574 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004575 for (i = 0; i < bp->rx_ring_size; i++) {
4576 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4577 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004578 "%d rx skbs on queue[%d]\n", i, j);
4579 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004580 break;
4581 }
4582 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004583 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07004584 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004585 }
4586
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004587 fp->rx_bd_prod = ring_prod;
4588 /* must not have more available CQEs than BDs */
4589 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4590 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004591 fp->rx_pkt = fp->rx_calls = 0;
4592
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004593 /* Warning!
4594 * this will generate an interrupt (to the TSTORM)
4595 * must only be done after chip is initialized
4596 */
4597 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4598 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004599 if (j != 0)
4600 continue;
4601
4602 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004603 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004604 U64_LO(fp->rx_comp_mapping));
4605 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004606 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607 U64_HI(fp->rx_comp_mapping));
4608 }
4609}
4610
4611static void bnx2x_init_tx_ring(struct bnx2x *bp)
4612{
4613 int i, j;
4614
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004615 for_each_tx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004616 struct bnx2x_fastpath *fp = &bp->fp[j];
4617
4618 for (i = 1; i <= NUM_TX_RINGS; i++) {
4619 struct eth_tx_bd *tx_bd =
4620 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4621
4622 tx_bd->addr_hi =
4623 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004624 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004625 tx_bd->addr_lo =
4626 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004627 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004628 }
4629
4630 fp->tx_pkt_prod = 0;
4631 fp->tx_pkt_cons = 0;
4632 fp->tx_bd_prod = 0;
4633 fp->tx_bd_cons = 0;
4634 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4635 fp->tx_pkt = 0;
4636 }
4637}
4638
4639static void bnx2x_init_sp_ring(struct bnx2x *bp)
4640{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004641 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004642
4643 spin_lock_init(&bp->spq_lock);
4644
4645 bp->spq_left = MAX_SPQ_PENDING;
4646 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004647 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4648 bp->spq_prod_bd = bp->spq;
4649 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4650
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004651 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004652 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004653 REG_WR(bp,
4654 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004655 U64_HI(bp->spq_mapping));
4656
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004658 bp->spq_prod_idx);
4659}
4660
4661static void bnx2x_init_context(struct bnx2x *bp)
4662{
4663 int i;
4664
4665 for_each_queue(bp, i) {
4666 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4667 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00004668 u8 cl_id = fp->cl_id;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004669 u8 sb_id = fp->sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004670
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004671 context->ustorm_st_context.common.sb_index_numbers =
4672 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004673 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004674 context->ustorm_st_context.common.status_block_id = sb_id;
4675 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004676 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4677 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4678 context->ustorm_st_context.common.statistics_counter_id =
4679 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004680 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00004681 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004682 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07004683 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004684 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004686 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004687 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004688 if (!fp->disable_tpa) {
4689 context->ustorm_st_context.common.flags |=
4690 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4691 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4692 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004693 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4694 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004695 context->ustorm_st_context.common.sge_page_base_hi =
4696 U64_HI(fp->rx_sge_mapping);
4697 context->ustorm_st_context.common.sge_page_base_lo =
4698 U64_LO(fp->rx_sge_mapping);
4699 }
4700
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004701 context->ustorm_ag_context.cdu_usage =
4702 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4703 CDU_REGION_NUMBER_UCM_AG,
4704 ETH_CONNECTION_TYPE);
4705
4706 context->xstorm_st_context.tx_bd_page_base_hi =
4707 U64_HI(fp->tx_desc_mapping);
4708 context->xstorm_st_context.tx_bd_page_base_lo =
4709 U64_LO(fp->tx_desc_mapping);
4710 context->xstorm_st_context.db_data_addr_hi =
4711 U64_HI(fp->tx_prods_mapping);
4712 context->xstorm_st_context.db_data_addr_lo =
4713 U64_LO(fp->tx_prods_mapping);
Eilon Greenstein0626b892009-02-12 08:38:14 +00004714 context->xstorm_st_context.statistics_data = (cl_id |
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004715 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004716 context->cstorm_st_context.sb_index_number =
Eilon Greenstein5c862842008-08-13 15:51:48 -07004717 C_SB_ETH_TX_CQ_INDEX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004718 context->cstorm_st_context.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004719
4720 context->xstorm_ag_context.cdu_reserved =
4721 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4722 CDU_REGION_NUMBER_XCM_AG,
4723 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004724 }
4725}
4726
4727static void bnx2x_init_ind_table(struct bnx2x *bp)
4728{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004729 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004730 int i;
4731
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004732 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004733 return;
4734
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004735 DP(NETIF_MSG_IFUP,
4736 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004737 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004738 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004739 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Eilon Greenstein0626b892009-02-12 08:38:14 +00004740 bp->fp->cl_id + (i % bp->num_rx_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004741}
4742
Eliezer Tamir49d66772008-02-28 11:53:13 -08004743static void bnx2x_set_client_config(struct bnx2x *bp)
4744{
Eliezer Tamir49d66772008-02-28 11:53:13 -08004745 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004746 int port = BP_PORT(bp);
4747 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004748
Eilon Greensteine7799c52009-01-14 21:30:27 -08004749 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004750 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004751 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4752 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08004753#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08004754 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08004755 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004756 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004757 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4758 }
4759#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08004760
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004761 if (bp->flags & TPA_ENABLE_FLAG) {
4762 tstorm_client.max_sges_for_packet =
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08004763 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004764 tstorm_client.max_sges_for_packet =
4765 ((tstorm_client.max_sges_for_packet +
4766 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4767 PAGES_PER_SGE_SHIFT;
4768
4769 tstorm_client.config_flags |=
4770 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4771 }
4772
Eliezer Tamir49d66772008-02-28 11:53:13 -08004773 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004774 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4775
Eliezer Tamir49d66772008-02-28 11:53:13 -08004776 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004777 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08004778 ((u32 *)&tstorm_client)[0]);
4779 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004780 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08004781 ((u32 *)&tstorm_client)[1]);
4782 }
4783
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4785 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08004786}
4787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004788static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4789{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004790 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004791 int mode = bp->rx_mode;
4792 int mask = (1 << BP_L_ID(bp));
4793 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004794 int i;
4795
Eilon Greenstein3196a882008-08-13 15:58:49 -07004796 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004797
4798 switch (mode) {
4799 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004800 tstorm_mac_filter.ucast_drop_all = mask;
4801 tstorm_mac_filter.mcast_drop_all = mask;
4802 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004803 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004804
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004805 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004806 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004807 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004808
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004809 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004810 tstorm_mac_filter.mcast_accept_all = mask;
4811 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004812 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004813
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004814 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004815 tstorm_mac_filter.ucast_accept_all = mask;
4816 tstorm_mac_filter.mcast_accept_all = mask;
4817 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004818 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004819
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004820 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004821 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4822 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823 }
4824
4825 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4826 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004827 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004828 ((u32 *)&tstorm_mac_filter)[i]);
4829
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004830/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004831 ((u32 *)&tstorm_mac_filter)[i]); */
4832 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004833
Eliezer Tamir49d66772008-02-28 11:53:13 -08004834 if (mode != BNX2X_RX_MODE_NONE)
4835 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004836}
4837
Eilon Greenstein471de712008-08-13 15:49:35 -07004838static void bnx2x_init_internal_common(struct bnx2x *bp)
4839{
4840 int i;
4841
Yitchak Gertner3cdf1db2008-08-25 15:24:21 -07004842 if (bp->flags & TPA_ENABLE_FLAG) {
4843 struct tstorm_eth_tpa_exist tpa = {0};
4844
4845 tpa.tpa_exist = 1;
4846
4847 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4848 ((u32 *)&tpa)[0]);
4849 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4850 ((u32 *)&tpa)[1]);
4851 }
4852
Eilon Greenstein471de712008-08-13 15:49:35 -07004853 /* Zero this manually as its initialization is
4854 currently missing in the initTool */
4855 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4856 REG_WR(bp, BAR_USTRORM_INTMEM +
4857 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4858}
4859
4860static void bnx2x_init_internal_port(struct bnx2x *bp)
4861{
4862 int port = BP_PORT(bp);
4863
4864 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4865 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868}
4869
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00004870/* Calculates the sum of vn_min_rates.
4871 It's needed for further normalizing of the min_rates.
4872 Returns:
4873 sum of vn_min_rates.
4874 or
4875 0 - if all the min_rates are 0.
4876 In the later case fainess algorithm should be deactivated.
4877 If not all min_rates are zero then those that are zeroes will be set to 1.
4878 */
4879static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4880{
4881 int all_zero = 1;
4882 int port = BP_PORT(bp);
4883 int vn;
4884
4885 bp->vn_weight_sum = 0;
4886 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4887 int func = 2*vn + port;
4888 u32 vn_cfg =
4889 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4890 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4891 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4892
4893 /* Skip hidden vns */
4894 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4895 continue;
4896
4897 /* If min rate is zero - set it to 1 */
4898 if (!vn_min_rate)
4899 vn_min_rate = DEF_MIN_RATE;
4900 else
4901 all_zero = 0;
4902
4903 bp->vn_weight_sum += vn_min_rate;
4904 }
4905
4906 /* ... only if all min rates are zeros - disable fairness */
4907 if (all_zero)
4908 bp->vn_weight_sum = 0;
4909}
4910
Eilon Greenstein471de712008-08-13 15:49:35 -07004911static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004912{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004913 struct tstorm_eth_function_common_config tstorm_config = {0};
4914 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004915 int port = BP_PORT(bp);
4916 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004917 int i, j;
4918 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07004919 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004920
4921 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004922 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004923 tstorm_config.rss_result_mask = MULTI_MASK;
4924 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004925 if (IS_E1HMF(bp))
4926 tstorm_config.config_flags |=
4927 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004928
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004929 tstorm_config.leading_client_id = BP_L_ID(bp);
4930
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004931 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004932 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004933 (*(u32 *)&tstorm_config));
4934
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004935 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004936 bnx2x_set_storm_rx_mode(bp);
4937
Eilon Greensteinde832a52009-02-12 08:36:33 +00004938 for_each_queue(bp, i) {
4939 u8 cl_id = bp->fp[i].cl_id;
4940
4941 /* reset xstorm per client statistics */
4942 offset = BAR_XSTRORM_INTMEM +
4943 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4944 for (j = 0;
4945 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4946 REG_WR(bp, offset + j*4, 0);
4947
4948 /* reset tstorm per client statistics */
4949 offset = BAR_TSTRORM_INTMEM +
4950 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4951 for (j = 0;
4952 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4953 REG_WR(bp, offset + j*4, 0);
4954
4955 /* reset ustorm per client statistics */
4956 offset = BAR_USTRORM_INTMEM +
4957 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4958 for (j = 0;
4959 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4960 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004961 }
4962
4963 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004964 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004965
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004966 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004967 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004969 ((u32 *)&stats_flags)[1]);
4970
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004971 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004972 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004973 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004974 ((u32 *)&stats_flags)[1]);
4975
Eilon Greensteinde832a52009-02-12 08:36:33 +00004976 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4977 ((u32 *)&stats_flags)[0]);
4978 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4979 ((u32 *)&stats_flags)[1]);
4980
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004981 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004982 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004983 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004984 ((u32 *)&stats_flags)[1]);
4985
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004986 REG_WR(bp, BAR_XSTRORM_INTMEM +
4987 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4988 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4989 REG_WR(bp, BAR_XSTRORM_INTMEM +
4990 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4991 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4992
4993 REG_WR(bp, BAR_TSTRORM_INTMEM +
4994 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4995 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4996 REG_WR(bp, BAR_TSTRORM_INTMEM +
4997 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4998 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004999
Eilon Greensteinde832a52009-02-12 08:36:33 +00005000 REG_WR(bp, BAR_USTRORM_INTMEM +
5001 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5002 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5003 REG_WR(bp, BAR_USTRORM_INTMEM +
5004 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5005 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5006
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005007 if (CHIP_IS_E1H(bp)) {
5008 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5009 IS_E1HMF(bp));
5010 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5011 IS_E1HMF(bp));
5012 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5013 IS_E1HMF(bp));
5014 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5015 IS_E1HMF(bp));
5016
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005017 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5018 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005019 }
5020
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005021 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5022 max_agg_size =
5023 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5024 SGE_PAGE_SIZE * PAGES_PER_SGE),
5025 (u32)0xffff);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005026 for_each_rx_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005027 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005028
5029 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005030 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005031 U64_LO(fp->rx_comp_mapping));
5032 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005033 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005034 U64_HI(fp->rx_comp_mapping));
5035
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005036 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005037 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005038 max_agg_size);
5039 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005040
Eilon Greenstein1c063282009-02-12 08:36:43 +00005041 /* dropless flow control */
5042 if (CHIP_IS_E1H(bp)) {
5043 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5044
5045 rx_pause.bd_thr_low = 250;
5046 rx_pause.cqe_thr_low = 250;
5047 rx_pause.cos = 1;
5048 rx_pause.sge_thr_low = 0;
5049 rx_pause.bd_thr_high = 350;
5050 rx_pause.cqe_thr_high = 350;
5051 rx_pause.sge_thr_high = 0;
5052
5053 for_each_rx_queue(bp, i) {
5054 struct bnx2x_fastpath *fp = &bp->fp[i];
5055
5056 if (!fp->disable_tpa) {
5057 rx_pause.sge_thr_low = 150;
5058 rx_pause.sge_thr_high = 250;
5059 }
5060
5061
5062 offset = BAR_USTRORM_INTMEM +
5063 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5064 fp->cl_id);
5065 for (j = 0;
5066 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5067 j++)
5068 REG_WR(bp, offset + j*4,
5069 ((u32 *)&rx_pause)[j]);
5070 }
5071 }
5072
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005073 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5074
5075 /* Init rate shaping and fairness contexts */
5076 if (IS_E1HMF(bp)) {
5077 int vn;
5078
5079 /* During init there is no active link
5080 Until link is up, set link rate to 10Gbps */
5081 bp->link_vars.line_speed = SPEED_10000;
5082 bnx2x_init_port_minmax(bp);
5083
5084 bnx2x_calc_vn_weight_sum(bp);
5085
5086 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5087 bnx2x_init_vn_minmax(bp, 2*vn + port);
5088
5089 /* Enable rate shaping and fairness */
5090 bp->cmng.flags.cmng_enables =
5091 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5092 if (bp->vn_weight_sum)
5093 bp->cmng.flags.cmng_enables |=
5094 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5095 else
5096 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5097 " fairness will be disabled\n");
5098 } else {
5099 /* rate shaping and fairness are disabled */
5100 DP(NETIF_MSG_IFUP,
5101 "single function mode minmax will be disabled\n");
5102 }
5103
5104
5105 /* Store it to internal memory */
5106 if (bp->port.pmf)
5107 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5108 REG_WR(bp, BAR_XSTRORM_INTMEM +
5109 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5110 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005111}
5112
Eilon Greenstein471de712008-08-13 15:49:35 -07005113static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5114{
5115 switch (load_code) {
5116 case FW_MSG_CODE_DRV_LOAD_COMMON:
5117 bnx2x_init_internal_common(bp);
5118 /* no break */
5119
5120 case FW_MSG_CODE_DRV_LOAD_PORT:
5121 bnx2x_init_internal_port(bp);
5122 /* no break */
5123
5124 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5125 bnx2x_init_internal_func(bp);
5126 break;
5127
5128 default:
5129 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5130 break;
5131 }
5132}
5133
5134static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005135{
5136 int i;
5137
5138 for_each_queue(bp, i) {
5139 struct bnx2x_fastpath *fp = &bp->fp[i];
5140
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005141 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005142 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005143 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005144 fp->cl_id = BP_L_ID(bp) + i;
5145 fp->sb_id = fp->cl_id;
5146 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005147 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5148 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005149 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005150 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005151 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005152 }
5153
Eilon Greenstein16119782009-03-02 07:59:27 +00005154 /* ensure status block indices were read */
5155 rmb();
5156
5157
Eilon Greenstein5c862842008-08-13 15:51:48 -07005158 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5159 DEF_SB_ID);
5160 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005161 bnx2x_update_coalesce(bp);
5162 bnx2x_init_rx_rings(bp);
5163 bnx2x_init_tx_ring(bp);
5164 bnx2x_init_sp_ring(bp);
5165 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005166 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005167 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005168 bnx2x_stats_init(bp);
5169
5170 /* At this point, we are ready for interrupts */
5171 atomic_set(&bp->intr_sem, 0);
5172
5173 /* flush all before enabling interrupts */
5174 mb();
5175 mmiowb();
5176
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005177 bnx2x_int_enable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005178}
5179
5180/* end of nic init */
5181
5182/*
5183 * gzip service functions
5184 */
5185
5186static int bnx2x_gunzip_init(struct bnx2x *bp)
5187{
5188 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5189 &bp->gunzip_mapping);
5190 if (bp->gunzip_buf == NULL)
5191 goto gunzip_nomem1;
5192
5193 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5194 if (bp->strm == NULL)
5195 goto gunzip_nomem2;
5196
5197 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5198 GFP_KERNEL);
5199 if (bp->strm->workspace == NULL)
5200 goto gunzip_nomem3;
5201
5202 return 0;
5203
5204gunzip_nomem3:
5205 kfree(bp->strm);
5206 bp->strm = NULL;
5207
5208gunzip_nomem2:
5209 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5210 bp->gunzip_mapping);
5211 bp->gunzip_buf = NULL;
5212
5213gunzip_nomem1:
5214 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005215 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005216 return -ENOMEM;
5217}
5218
5219static void bnx2x_gunzip_end(struct bnx2x *bp)
5220{
5221 kfree(bp->strm->workspace);
5222
5223 kfree(bp->strm);
5224 bp->strm = NULL;
5225
5226 if (bp->gunzip_buf) {
5227 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5228 bp->gunzip_mapping);
5229 bp->gunzip_buf = NULL;
5230 }
5231}
5232
5233static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5234{
5235 int n, rc;
5236
5237 /* check gzip header */
5238 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5239 return -EINVAL;
5240
5241 n = 10;
5242
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005243#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005244
5245 if (zbuf[3] & FNAME)
5246 while ((zbuf[n++] != 0) && (n < len));
5247
5248 bp->strm->next_in = zbuf + n;
5249 bp->strm->avail_in = len - n;
5250 bp->strm->next_out = bp->gunzip_buf;
5251 bp->strm->avail_out = FW_BUF_SIZE;
5252
5253 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5254 if (rc != Z_OK)
5255 return rc;
5256
5257 rc = zlib_inflate(bp->strm, Z_FINISH);
5258 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5259 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5260 bp->dev->name, bp->strm->msg);
5261
5262 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5263 if (bp->gunzip_outlen & 0x3)
5264 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5265 " gunzip_outlen (%d) not aligned\n",
5266 bp->dev->name, bp->gunzip_outlen);
5267 bp->gunzip_outlen >>= 2;
5268
5269 zlib_inflateEnd(bp->strm);
5270
5271 if (rc == Z_STREAM_END)
5272 return 0;
5273
5274 return rc;
5275}
5276
5277/* nic load/unload */
5278
5279/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005280 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005281 */
5282
5283/* send a NIG loopback debug packet */
5284static void bnx2x_lb_pckt(struct bnx2x *bp)
5285{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005286 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005287
5288 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005289 wb_write[0] = 0x55555555;
5290 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005291 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005293
5294 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005295 wb_write[0] = 0x09000000;
5296 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005297 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005298 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005299}
5300
5301/* some of the internal memories
5302 * are not directly readable from the driver
5303 * to test them we send debug packets
5304 */
5305static int bnx2x_int_mem_test(struct bnx2x *bp)
5306{
5307 int factor;
5308 int count, i;
5309 u32 val = 0;
5310
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005311 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005312 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005313 else if (CHIP_REV_IS_EMUL(bp))
5314 factor = 200;
5315 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005317
5318 DP(NETIF_MSG_HW, "start part1\n");
5319
5320 /* Disable inputs of parser neighbor blocks */
5321 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5322 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5323 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005324 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325
5326 /* Write 0 to parser credits for CFC search request */
5327 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5328
5329 /* send Ethernet packet */
5330 bnx2x_lb_pckt(bp);
5331
5332 /* TODO do i reset NIG statistic? */
5333 /* Wait until NIG register shows 1 packet of size 0x10 */
5334 count = 1000 * factor;
5335 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005336
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005337 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5338 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005339 if (val == 0x10)
5340 break;
5341
5342 msleep(10);
5343 count--;
5344 }
5345 if (val != 0x10) {
5346 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5347 return -1;
5348 }
5349
5350 /* Wait until PRS register shows 1 packet */
5351 count = 1000 * factor;
5352 while (count) {
5353 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005354 if (val == 1)
5355 break;
5356
5357 msleep(10);
5358 count--;
5359 }
5360 if (val != 0x1) {
5361 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5362 return -2;
5363 }
5364
5365 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005366 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005367 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005368 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005369 msleep(50);
5370 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5371 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5372
5373 DP(NETIF_MSG_HW, "part2\n");
5374
5375 /* Disable inputs of parser neighbor blocks */
5376 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5377 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5378 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005379 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005380
5381 /* Write 0 to parser credits for CFC search request */
5382 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5383
5384 /* send 10 Ethernet packets */
5385 for (i = 0; i < 10; i++)
5386 bnx2x_lb_pckt(bp);
5387
5388 /* Wait until NIG register shows 10 + 1
5389 packets of size 11*0x10 = 0xb0 */
5390 count = 1000 * factor;
5391 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005392
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005393 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5394 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005395 if (val == 0xb0)
5396 break;
5397
5398 msleep(10);
5399 count--;
5400 }
5401 if (val != 0xb0) {
5402 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5403 return -3;
5404 }
5405
5406 /* Wait until PRS register shows 2 packets */
5407 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5408 if (val != 2)
5409 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5410
5411 /* Write 1 to parser credits for CFC search request */
5412 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5413
5414 /* Wait until PRS register shows 3 packets */
5415 msleep(10 * factor);
5416 /* Wait until NIG register shows 1 packet of size 0x10 */
5417 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5418 if (val != 3)
5419 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5420
5421 /* clear NIG EOP FIFO */
5422 for (i = 0; i < 11; i++)
5423 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5424 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5425 if (val != 1) {
5426 BNX2X_ERR("clear of NIG failed\n");
5427 return -4;
5428 }
5429
5430 /* Reset and init BRB, PRS, NIG */
5431 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5432 msleep(50);
5433 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5434 msleep(50);
5435 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5436 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5437#ifndef BCM_ISCSI
5438 /* set NIC mode */
5439 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5440#endif
5441
5442 /* Enable inputs of parser neighbor blocks */
5443 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5444 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5445 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005446 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005447
5448 DP(NETIF_MSG_HW, "done\n");
5449
5450 return 0; /* OK */
5451}
5452
5453static void enable_blocks_attention(struct bnx2x *bp)
5454{
5455 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5456 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5457 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5458 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5459 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5460 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5461 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5462 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5463 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005464/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5465/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005466 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5467 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5468 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005469/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5470/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005471 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5472 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5473 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5474 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005475/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5476/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5477 if (CHIP_REV_IS_FPGA(bp))
5478 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5479 else
5480 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005481 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5482 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5483 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005484/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5485/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005486 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5487 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005488/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5489 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005490}
5491
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005492
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005493static void bnx2x_reset_common(struct bnx2x *bp)
5494{
5495 /* reset_common */
5496 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5497 0xd3ffff7f);
5498 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5499}
5500
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005501static int bnx2x_init_common(struct bnx2x *bp)
5502{
5503 u32 val, i;
5504
5505 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5506
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005507 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005508 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5510
5511 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5512 if (CHIP_IS_E1H(bp))
5513 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5514
5515 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5516 msleep(30);
5517 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5518
5519 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5520 if (CHIP_IS_E1(bp)) {
5521 /* enable HW interrupt from PXP on USDM overflow
5522 bit 16 on INT_MASK_0 */
5523 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005524 }
5525
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005526 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5527 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005528
5529#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005530 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5531 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5532 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5533 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5534 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005535 /* make sure this value is 0 */
5536 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005538/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5539 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5540 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5541 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5542 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005543#endif
5544
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005545 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005546#ifdef BCM_ISCSI
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005547 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5548 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5549 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005550#endif
5551
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005552 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5553 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005554
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005555 /* let the HW do it's magic ... */
5556 msleep(100);
5557 /* finish PXP init */
5558 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5559 if (val != 1) {
5560 BNX2X_ERR("PXP2 CFG failed\n");
5561 return -EBUSY;
5562 }
5563 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5564 if (val != 1) {
5565 BNX2X_ERR("PXP2 RD_INIT failed\n");
5566 return -EBUSY;
5567 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005568
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005569 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5570 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005571
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005572 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005573
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005574 /* clean the DMAE memory */
5575 bp->dmae_ready = 1;
5576 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005577
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005578 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5579 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5580 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5581 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005582
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005583 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5584 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5585 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5586 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5587
5588 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5589 /* soft reset pulse */
5590 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5591 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005592
5593#ifdef BCM_ISCSI
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005594 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005595#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005596
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005597 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5598 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5599 if (!CHIP_REV_IS_SLOW(bp)) {
5600 /* enable hw interrupt from doorbell Q */
5601 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5602 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005603
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005604 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005605 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005606 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005607 /* set NIC mode */
5608 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005609 if (CHIP_IS_E1H(bp))
5610 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005611
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005612 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5613 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5614 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5615 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005616
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005617 if (CHIP_IS_E1H(bp)) {
5618 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5619 STORM_INTMEM_SIZE_E1H/2);
5620 bnx2x_init_fill(bp,
5621 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5622 0, STORM_INTMEM_SIZE_E1H/2);
5623 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5624 STORM_INTMEM_SIZE_E1H/2);
5625 bnx2x_init_fill(bp,
5626 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5627 0, STORM_INTMEM_SIZE_E1H/2);
5628 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5629 STORM_INTMEM_SIZE_E1H/2);
5630 bnx2x_init_fill(bp,
5631 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5632 0, STORM_INTMEM_SIZE_E1H/2);
5633 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5634 STORM_INTMEM_SIZE_E1H/2);
5635 bnx2x_init_fill(bp,
5636 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5637 0, STORM_INTMEM_SIZE_E1H/2);
5638 } else { /* E1 */
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005639 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5640 STORM_INTMEM_SIZE_E1);
5641 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5642 STORM_INTMEM_SIZE_E1);
5643 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5644 STORM_INTMEM_SIZE_E1);
5645 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5646 STORM_INTMEM_SIZE_E1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005647 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005648
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005649 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5650 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5651 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5652 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005653
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005654 /* sync semi rtc */
5655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5656 0x80000000);
5657 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5658 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005659
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005660 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5661 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5662 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005663
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005664 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5665 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5666 REG_WR(bp, i, 0xc0cac01a);
5667 /* TODO: replace with something meaningful */
5668 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005669 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005670 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005671
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005672 if (sizeof(union cdu_context) != 1024)
5673 /* we currently assume that a context is 1024 bytes */
5674 printk(KERN_ALERT PFX "please adjust the size of"
5675 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005676
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005677 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5678 val = (4 << 24) + (0 << 12) + 1024;
5679 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5680 if (CHIP_IS_E1(bp)) {
5681 /* !!! fix pxp client crdit until excel update */
5682 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5683 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5684 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005685
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005686 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5687 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005688 /* enable context validation interrupt from CFC */
5689 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5690
5691 /* set the thresholds to prevent CFC/CDU race */
5692 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005694 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5695 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005696
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005697 /* PXPCS COMMON comes here */
5698 /* Reset PCIE errors for debug */
5699 REG_WR(bp, 0x2814, 0xffffffff);
5700 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005702 /* EMAC0 COMMON comes here */
5703 /* EMAC1 COMMON comes here */
5704 /* DBU COMMON comes here */
5705 /* DBG COMMON comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005706
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005707 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5708 if (CHIP_IS_E1H(bp)) {
5709 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5710 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5711 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005712
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005713 if (CHIP_REV_IS_SLOW(bp))
5714 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005715
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005716 /* finish CFC init */
5717 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5718 if (val != 1) {
5719 BNX2X_ERR("CFC LL_INIT failed\n");
5720 return -EBUSY;
5721 }
5722 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5723 if (val != 1) {
5724 BNX2X_ERR("CFC AC_INIT failed\n");
5725 return -EBUSY;
5726 }
5727 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5728 if (val != 1) {
5729 BNX2X_ERR("CFC CAM_INIT failed\n");
5730 return -EBUSY;
5731 }
5732 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005733
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005734 /* read NIG statistic
5735 to see if this is our first up since powerup */
5736 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5737 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005738
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005739 /* do internal memory self test */
5740 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5741 BNX2X_ERR("internal mem self test failed\n");
5742 return -EBUSY;
5743 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005744
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005745 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00005746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5748 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5749 bp->port.need_hw_lock = 1;
5750 break;
5751
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005752 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005753 /* Fan failure is indicated by SPIO 5 */
5754 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5755 MISC_REGISTERS_SPIO_INPUT_HI_Z);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005757 /* set to active low mode */
5758 val = REG_RD(bp, MISC_REG_SPIO_INT);
5759 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Eliezer Tamirf1410642008-02-28 11:51:50 -08005760 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005761 REG_WR(bp, MISC_REG_SPIO_INT, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005762
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005763 /* enable interrupt to signal the IGU */
5764 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5765 val |= (1 << MISC_REGISTERS_SPIO_5);
5766 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5767 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08005768
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005769 default:
5770 break;
5771 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08005772
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005773 /* clear PXP2 attentions */
5774 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005775
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005776 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005777
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005778 if (!BP_NOMCP(bp)) {
5779 bnx2x_acquire_phy_lock(bp);
5780 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5781 bnx2x_release_phy_lock(bp);
5782 } else
5783 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5784
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005785 return 0;
5786}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005787
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005788static int bnx2x_init_port(struct bnx2x *bp)
5789{
5790 int port = BP_PORT(bp);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005791 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005792 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005793
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005794 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5795
5796 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005797
5798 /* Port PXP comes here */
5799 /* Port PXP2 comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005800#ifdef BCM_ISCSI
5801 /* Port0 1
5802 * Port1 385 */
5803 i++;
5804 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5805 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5806 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5807 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5808
5809 /* Port0 2
5810 * Port1 386 */
5811 i++;
5812 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5813 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5814 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5815 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5816
5817 /* Port0 3
5818 * Port1 387 */
5819 i++;
5820 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5821 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5822 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5823 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5824#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005825 /* Port CMs come here */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005826 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5827 (port ? XCM_PORT1_END : XCM_PORT0_END));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005828
5829 /* Port QM comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005830#ifdef BCM_ISCSI
5831 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5832 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5833
5834 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5835 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5836#endif
5837 /* Port DQ comes here */
Eilon Greenstein1c063282009-02-12 08:36:43 +00005838
5839 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5840 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5841 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5842 /* no pause for emulation and FPGA */
5843 low = 0;
5844 high = 513;
5845 } else {
5846 if (IS_E1HMF(bp))
5847 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5848 else if (bp->dev->mtu > 4096) {
5849 if (bp->flags & ONE_PORT_FLAG)
5850 low = 160;
5851 else {
5852 val = bp->dev->mtu;
5853 /* (24*1024 + val*4)/256 */
5854 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5855 }
5856 } else
5857 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5858 high = low + 56; /* 14*1024/256 */
5859 }
5860 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5861 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5862
5863
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005864 /* Port PRS comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005865 /* Port TSDM comes here */
5866 /* Port CSDM comes here */
5867 /* Port USDM comes here */
5868 /* Port XSDM comes here */
Eilon Greenstein356e2382009-02-12 08:38:32 +00005869
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005870 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5871 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5872 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5873 port ? USEM_PORT1_END : USEM_PORT0_END);
5874 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5875 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5876 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5877 port ? XSEM_PORT1_END : XSEM_PORT0_END);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005878
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879 /* Port UPB comes here */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005880 /* Port XPB comes here */
5881
5882 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5883 port ? PBF_PORT1_END : PBF_PORT0_END);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005884
5885 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005886 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005887
5888 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005889 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005890 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005891 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005892
5893 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005894 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005895 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005896 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005897
5898#ifdef BCM_ISCSI
5899 /* tell the searcher where the T2 table is */
5900 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5901
5902 wb_write[0] = U64_LO(bp->t2_mapping);
5903 wb_write[1] = U64_HI(bp->t2_mapping);
5904 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5905 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5906 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5907 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5908
5909 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5910 /* Port SRCH comes here */
5911#endif
5912 /* Port CDU comes here */
5913 /* Port CFC comes here */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005914
5915 if (CHIP_IS_E1(bp)) {
5916 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5917 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5918 }
5919 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5920 port ? HC_PORT1_END : HC_PORT0_END);
5921
5922 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005923 MISC_AEU_PORT0_START,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005924 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5925 /* init aeu_mask_attn_func_0/1:
5926 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5927 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5928 * bits 4-7 are used for "per vn group attention" */
5929 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5930 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5931
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005932 /* Port PXPCS comes here */
5933 /* Port EMAC0 comes here */
5934 /* Port EMAC1 comes here */
5935 /* Port DBU comes here */
5936 /* Port DBG comes here */
Eilon Greenstein356e2382009-02-12 08:38:32 +00005937
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005938 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5939 port ? NIG_PORT1_END : NIG_PORT0_END);
5940
5941 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5942
5943 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005944 /* 0x2 disable e1hov, 0x1 enable */
5945 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5946 (IS_E1HMF(bp) ? 0x1 : 0x2));
5947
Eilon Greenstein1c063282009-02-12 08:36:43 +00005948 /* support pause requests from USDM, TSDM and BRB */
5949 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5950
5951 {
5952 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5953 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5954 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5955 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005956 }
5957
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005958 /* Port MCP comes here */
5959 /* Port DMAE comes here */
5960
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005961 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00005962 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5963 {
5964 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5965
5966 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5967 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5968
5969 /* The GPIO should be swapped if the swap register is
5970 set and active */
5971 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5972 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5973
5974 /* Select function upon port-swap configuration */
5975 if (port == 0) {
5976 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5977 aeu_gpio_mask = (swap_val && swap_override) ?
5978 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5979 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5980 } else {
5981 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5982 aeu_gpio_mask = (swap_val && swap_override) ?
5983 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5984 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5985 }
5986 val = REG_RD(bp, offset);
5987 /* add GPIO3 to group */
5988 val |= aeu_gpio_mask;
5989 REG_WR(bp, offset, val);
5990 }
5991 break;
5992
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005993 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eliezer Tamirf1410642008-02-28 11:51:50 -08005994 /* add SPIO 5 to group 0 */
5995 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5996 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5997 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5998 break;
5999
6000 default:
6001 break;
6002 }
6003
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006004 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006005
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006006 return 0;
6007}
6008
6009#define ILT_PER_FUNC (768/2)
6010#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6011/* the phys address is shifted right 12 bits and has an added
6012 1=valid bit added to the 53rd bit
6013 then since this is a wide register(TM)
6014 we split it into two 32 bit writes
6015 */
6016#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6017#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6018#define PXP_ONE_ILT(x) (((x) << 10) | x)
6019#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6020
6021#define CNIC_ILT_LINES 0
6022
6023static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6024{
6025 int reg;
6026
6027 if (CHIP_IS_E1H(bp))
6028 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6029 else /* E1 */
6030 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6031
6032 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6033}
6034
6035static int bnx2x_init_func(struct bnx2x *bp)
6036{
6037 int port = BP_PORT(bp);
6038 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006039 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006040 int i;
6041
6042 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6043
Eilon Greenstein8badd272009-02-12 08:36:15 +00006044 /* set MSI reconfigure capability */
6045 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6046 val = REG_RD(bp, addr);
6047 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6048 REG_WR(bp, addr, val);
6049
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006050 i = FUNC_ILT_BASE(func);
6051
6052 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6053 if (CHIP_IS_E1H(bp)) {
6054 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6055 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6056 } else /* E1 */
6057 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6058 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6059
6060
6061 if (CHIP_IS_E1H(bp)) {
6062 for (i = 0; i < 9; i++)
6063 bnx2x_init_block(bp,
6064 cm_start[func][i], cm_end[func][i]);
6065
6066 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6067 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6068 }
6069
6070 /* HC init per function */
6071 if (CHIP_IS_E1H(bp)) {
6072 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6073
6074 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6075 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6076 }
6077 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6078
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006079 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006080 REG_WR(bp, 0x2114, 0xffffffff);
6081 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006082
6083 return 0;
6084}
6085
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006086static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6087{
6088 int i, rc = 0;
6089
6090 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6091 BP_FUNC(bp), load_code);
6092
6093 bp->dmae_ready = 0;
6094 mutex_init(&bp->dmae_mutex);
6095 bnx2x_gunzip_init(bp);
6096
6097 switch (load_code) {
6098 case FW_MSG_CODE_DRV_LOAD_COMMON:
6099 rc = bnx2x_init_common(bp);
6100 if (rc)
6101 goto init_hw_err;
6102 /* no break */
6103
6104 case FW_MSG_CODE_DRV_LOAD_PORT:
6105 bp->dmae_ready = 1;
6106 rc = bnx2x_init_port(bp);
6107 if (rc)
6108 goto init_hw_err;
6109 /* no break */
6110
6111 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6112 bp->dmae_ready = 1;
6113 rc = bnx2x_init_func(bp);
6114 if (rc)
6115 goto init_hw_err;
6116 break;
6117
6118 default:
6119 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6120 break;
6121 }
6122
6123 if (!BP_NOMCP(bp)) {
6124 int func = BP_FUNC(bp);
6125
6126 bp->fw_drv_pulse_wr_seq =
6127 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6128 DRV_PULSE_SEQ_MASK);
6129 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6130 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6131 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6132 } else
6133 bp->func_stx = 0;
6134
6135 /* this needs to be done before gunzip end */
6136 bnx2x_zero_def_sb(bp);
6137 for_each_queue(bp, i)
6138 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6139
6140init_hw_err:
6141 bnx2x_gunzip_end(bp);
6142
6143 return rc;
6144}
6145
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006146/* send the MCP a request, block until there is a reply */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6148{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006149 int func = BP_FUNC(bp);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006150 u32 seq = ++bp->fw_seq;
6151 u32 rc = 0;
Eilon Greenstein19680c42008-08-13 15:47:33 -07006152 u32 cnt = 1;
6153 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006154
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006155 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
Eliezer Tamirf1410642008-02-28 11:51:50 -08006156 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006157
Eilon Greenstein19680c42008-08-13 15:47:33 -07006158 do {
6159 /* let the FW do it's magic ... */
6160 msleep(delay);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006161
Eilon Greenstein19680c42008-08-13 15:47:33 -07006162 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006163
Eilon Greenstein19680c42008-08-13 15:47:33 -07006164 /* Give the FW up to 2 second (200*10ms) */
6165 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6166
6167 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6168 cnt*delay, rc, seq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006169
6170 /* is this a reply to our command? */
6171 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6172 rc &= FW_MSG_CODE_MASK;
Eliezer Tamirf1410642008-02-28 11:51:50 -08006173
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006174 } else {
6175 /* FW BUG! */
6176 BNX2X_ERR("FW failed to respond!\n");
6177 bnx2x_fw_dump(bp);
6178 rc = 0;
6179 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006180
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006181 return rc;
6182}
6183
6184static void bnx2x_free_mem(struct bnx2x *bp)
6185{
6186
6187#define BNX2X_PCI_FREE(x, y, size) \
6188 do { \
6189 if (x) { \
6190 pci_free_consistent(bp->pdev, size, x, y); \
6191 x = NULL; \
6192 y = 0; \
6193 } \
6194 } while (0)
6195
6196#define BNX2X_FREE(x) \
6197 do { \
6198 if (x) { \
6199 vfree(x); \
6200 x = NULL; \
6201 } \
6202 } while (0)
6203
6204 int i;
6205
6206 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006207 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006208 for_each_queue(bp, i) {
6209
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006210 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006211 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6212 bnx2x_fp(bp, i, status_blk_mapping),
6213 sizeof(struct host_status_block) +
6214 sizeof(struct eth_tx_db_data));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006215 }
6216 /* Rx */
6217 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006218
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006219 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006220 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6221 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6222 bnx2x_fp(bp, i, rx_desc_mapping),
6223 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6224
6225 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6226 bnx2x_fp(bp, i, rx_comp_mapping),
6227 sizeof(struct eth_fast_path_rx_cqe) *
6228 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006229
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006230 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006231 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006232 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6233 bnx2x_fp(bp, i, rx_sge_mapping),
6234 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6235 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006236 /* Tx */
6237 for_each_tx_queue(bp, i) {
6238
6239 /* fastpath tx rings: tx_buf tx_desc */
6240 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6241 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6242 bnx2x_fp(bp, i, tx_desc_mapping),
6243 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6244 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006245 /* end of fastpath */
6246
6247 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006248 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249
6250 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006251 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252
6253#ifdef BCM_ISCSI
6254 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6255 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6256 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6257 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6258#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006259 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006260
6261#undef BNX2X_PCI_FREE
6262#undef BNX2X_KFREE
6263}
6264
6265static int bnx2x_alloc_mem(struct bnx2x *bp)
6266{
6267
6268#define BNX2X_PCI_ALLOC(x, y, size) \
6269 do { \
6270 x = pci_alloc_consistent(bp->pdev, size, y); \
6271 if (x == NULL) \
6272 goto alloc_mem_err; \
6273 memset(x, 0, size); \
6274 } while (0)
6275
6276#define BNX2X_ALLOC(x, size) \
6277 do { \
6278 x = vmalloc(size); \
6279 if (x == NULL) \
6280 goto alloc_mem_err; \
6281 memset(x, 0, size); \
6282 } while (0)
6283
6284 int i;
6285
6286 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006287 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006288 for_each_queue(bp, i) {
6289 bnx2x_fp(bp, i, bp) = bp;
6290
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006291 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006292 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6293 &bnx2x_fp(bp, i, status_blk_mapping),
6294 sizeof(struct host_status_block) +
6295 sizeof(struct eth_tx_db_data));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006296 }
6297 /* Rx */
6298 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006299
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006300 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6302 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6304 &bnx2x_fp(bp, i, rx_desc_mapping),
6305 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6306
6307 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6308 &bnx2x_fp(bp, i, rx_comp_mapping),
6309 sizeof(struct eth_fast_path_rx_cqe) *
6310 NUM_RCQ_BD);
6311
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006312 /* SGE ring */
6313 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6314 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6315 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6316 &bnx2x_fp(bp, i, rx_sge_mapping),
6317 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006318 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006319 /* Tx */
6320 for_each_tx_queue(bp, i) {
6321
6322 bnx2x_fp(bp, i, hw_tx_prods) =
6323 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6324
6325 bnx2x_fp(bp, i, tx_prods_mapping) =
6326 bnx2x_fp(bp, i, status_blk_mapping) +
6327 sizeof(struct host_status_block);
6328
6329 /* fastpath tx rings: tx_buf tx_desc */
6330 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6331 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6332 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6333 &bnx2x_fp(bp, i, tx_desc_mapping),
6334 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6335 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006336 /* end of fastpath */
6337
6338 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6339 sizeof(struct host_def_status_block));
6340
6341 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6342 sizeof(struct bnx2x_slowpath));
6343
6344#ifdef BCM_ISCSI
6345 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6346
6347 /* Initialize T1 */
6348 for (i = 0; i < 64*1024; i += 64) {
6349 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6350 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6351 }
6352
6353 /* allocate searcher T2 table
6354 we allocate 1/4 of alloc num for T2
6355 (which is not entered into the ILT) */
6356 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6357
6358 /* Initialize T2 */
6359 for (i = 0; i < 16*1024; i += 64)
6360 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6361
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006362 /* now fixup the last line in the block to point to the next block */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006363 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6364
6365 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6366 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6367
6368 /* QM queues (128*MAX_CONN) */
6369 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6370#endif
6371
6372 /* Slow path ring */
6373 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6374
6375 return 0;
6376
6377alloc_mem_err:
6378 bnx2x_free_mem(bp);
6379 return -ENOMEM;
6380
6381#undef BNX2X_PCI_ALLOC
6382#undef BNX2X_ALLOC
6383}
6384
6385static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6386{
6387 int i;
6388
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006389 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006390 struct bnx2x_fastpath *fp = &bp->fp[i];
6391
6392 u16 bd_cons = fp->tx_bd_cons;
6393 u16 sw_prod = fp->tx_pkt_prod;
6394 u16 sw_cons = fp->tx_pkt_cons;
6395
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006396 while (sw_cons != sw_prod) {
6397 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6398 sw_cons++;
6399 }
6400 }
6401}
6402
6403static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6404{
6405 int i, j;
6406
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006407 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006408 struct bnx2x_fastpath *fp = &bp->fp[j];
6409
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006410 for (i = 0; i < NUM_RX_BD; i++) {
6411 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6412 struct sk_buff *skb = rx_buf->skb;
6413
6414 if (skb == NULL)
6415 continue;
6416
6417 pci_unmap_single(bp->pdev,
6418 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006419 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006420
6421 rx_buf->skb = NULL;
6422 dev_kfree_skb(skb);
6423 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006424 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006425 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6426 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006427 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006428 }
6429}
6430
6431static void bnx2x_free_skbs(struct bnx2x *bp)
6432{
6433 bnx2x_free_tx_skbs(bp);
6434 bnx2x_free_rx_skbs(bp);
6435}
6436
6437static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6438{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006439 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006440
6441 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006442 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006443 bp->msix_table[0].vector);
6444
6445 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006446 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006447 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006448 bnx2x_fp(bp, i, state));
6449
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006450 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006451 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006452}
6453
6454static void bnx2x_free_irq(struct bnx2x *bp)
6455{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006456 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006457 bnx2x_free_msix_irqs(bp);
6458 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006459 bp->flags &= ~USING_MSIX_FLAG;
6460
Eilon Greenstein8badd272009-02-12 08:36:15 +00006461 } else if (bp->flags & USING_MSI_FLAG) {
6462 free_irq(bp->pdev->irq, bp->dev);
6463 pci_disable_msi(bp->pdev);
6464 bp->flags &= ~USING_MSI_FLAG;
6465
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006466 } else
6467 free_irq(bp->pdev->irq, bp->dev);
6468}
6469
6470static int bnx2x_enable_msix(struct bnx2x *bp)
6471{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006472 int i, rc, offset = 1;
6473 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006474
Eilon Greenstein8badd272009-02-12 08:36:15 +00006475 bp->msix_table[0].entry = igu_vec;
6476 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006477
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006478 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006479 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006480 bp->msix_table[i + offset].entry = igu_vec;
6481 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6482 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006483 }
6484
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006485 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006486 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006487 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006488 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6489 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006490 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006491
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006492 bp->flags |= USING_MSIX_FLAG;
6493
6494 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006495}
6496
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006497static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6498{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006499 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006500
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006501 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6502 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006503 if (rc) {
6504 BNX2X_ERR("request sp irq failed\n");
6505 return -EBUSY;
6506 }
6507
6508 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006509 struct bnx2x_fastpath *fp = &bp->fp[i];
6510
6511 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006512 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006513 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006514 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006515 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006516 bnx2x_free_msix_irqs(bp);
6517 return -EBUSY;
6518 }
6519
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006520 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006521 }
6522
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006523 i = BNX2X_NUM_QUEUES(bp);
6524 if (is_multi(bp))
6525 printk(KERN_INFO PFX
6526 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6527 bp->dev->name, bp->msix_table[0].vector,
6528 bp->msix_table[offset].vector,
6529 bp->msix_table[offset + i - 1].vector);
6530 else
6531 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6532 bp->dev->name, bp->msix_table[0].vector,
6533 bp->msix_table[offset + i - 1].vector);
6534
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006535 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006536}
6537
Eilon Greenstein8badd272009-02-12 08:36:15 +00006538static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006539{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006540 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006541
Eilon Greenstein8badd272009-02-12 08:36:15 +00006542 rc = pci_enable_msi(bp->pdev);
6543 if (rc) {
6544 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6545 return -1;
6546 }
6547 bp->flags |= USING_MSI_FLAG;
6548
6549 return 0;
6550}
6551
6552static int bnx2x_req_irq(struct bnx2x *bp)
6553{
6554 unsigned long flags;
6555 int rc;
6556
6557 if (bp->flags & USING_MSI_FLAG)
6558 flags = 0;
6559 else
6560 flags = IRQF_SHARED;
6561
6562 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006563 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006564 if (!rc)
6565 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6566
6567 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006568}
6569
Yitchak Gertner65abd742008-08-25 15:26:24 -07006570static void bnx2x_napi_enable(struct bnx2x *bp)
6571{
6572 int i;
6573
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006574 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006575 napi_enable(&bnx2x_fp(bp, i, napi));
6576}
6577
6578static void bnx2x_napi_disable(struct bnx2x *bp)
6579{
6580 int i;
6581
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006582 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006583 napi_disable(&bnx2x_fp(bp, i, napi));
6584}
6585
6586static void bnx2x_netif_start(struct bnx2x *bp)
6587{
6588 if (atomic_dec_and_test(&bp->intr_sem)) {
6589 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006590 bnx2x_napi_enable(bp);
6591 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006592 if (bp->state == BNX2X_STATE_OPEN)
6593 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006594 }
6595 }
6596}
6597
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006598static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006599{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006600 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00006601 bnx2x_napi_disable(bp);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006602 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006603 netif_tx_disable(bp->dev);
6604 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6605 }
6606}
6607
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006608/*
6609 * Init service functions
6610 */
6611
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006612static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006613{
6614 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006615 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006616
6617 /* CAM allocation
6618 * unicasts 0-31:port0 32-63:port1
6619 * multicast 64-127:port0 128-191:port1
6620 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006621 config->hdr.length = 2;
Eilon Greensteinaf246402009-01-14 06:43:59 +00006622 config->hdr.offset = port ? 32 : 0;
Eilon Greenstein0626b892009-02-12 08:38:14 +00006623 config->hdr.client_id = bp->fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006624 config->hdr.reserved1 = 0;
6625
6626 /* primary MAC */
6627 config->config_table[0].cam_entry.msb_mac_addr =
6628 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6629 config->config_table[0].cam_entry.middle_mac_addr =
6630 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6631 config->config_table[0].cam_entry.lsb_mac_addr =
6632 swab16(*(u16 *)&bp->dev->dev_addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006633 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006634 if (set)
6635 config->config_table[0].target_table_entry.flags = 0;
6636 else
6637 CAM_INVALIDATE(config->config_table[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006638 config->config_table[0].target_table_entry.client_id = 0;
6639 config->config_table[0].target_table_entry.vlan_id = 0;
6640
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006641 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6642 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006643 config->config_table[0].cam_entry.msb_mac_addr,
6644 config->config_table[0].cam_entry.middle_mac_addr,
6645 config->config_table[0].cam_entry.lsb_mac_addr);
6646
6647 /* broadcast */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00006648 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6649 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6650 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006651 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006652 if (set)
6653 config->config_table[1].target_table_entry.flags =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006654 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006655 else
6656 CAM_INVALIDATE(config->config_table[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006657 config->config_table[1].target_table_entry.client_id = 0;
6658 config->config_table[1].target_table_entry.vlan_id = 0;
6659
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663}
6664
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006665static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666{
6667 struct mac_configuration_cmd_e1h *config =
6668 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6669
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006670 if (set && (bp->state != BNX2X_STATE_OPEN)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006671 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6672 return;
6673 }
6674
6675 /* CAM allocation for E1H
6676 * unicasts: by func number
6677 * multicast: 20+FUNC*20, 20 each
6678 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006679 config->hdr.length = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006680 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00006681 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006682 config->hdr.reserved1 = 0;
6683
6684 /* primary MAC */
6685 config->config_table[0].msb_mac_addr =
6686 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6687 config->config_table[0].middle_mac_addr =
6688 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6689 config->config_table[0].lsb_mac_addr =
6690 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6691 config->config_table[0].client_id = BP_L_ID(bp);
6692 config->config_table[0].vlan_id = 0;
6693 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006694 if (set)
6695 config->config_table[0].flags = BP_PORT(bp);
6696 else
6697 config->config_table[0].flags =
6698 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006699
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006700 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6701 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006702 config->config_table[0].msb_mac_addr,
6703 config->config_table[0].middle_mac_addr,
6704 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6705
6706 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6707 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6708 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6709}
6710
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006711static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6712 int *state_p, int poll)
6713{
6714 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006715 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006716
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006717 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6718 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006719
6720 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006721 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006722 if (poll) {
6723 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006724 /* if index is different from 0
6725 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006726 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006727 */
6728 if (idx)
6729 bnx2x_rx_int(&bp->fp[idx], 10);
6730 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006731
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006732 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006733 if (*state_p == state) {
6734#ifdef BNX2X_STOP_ON_ERROR
6735 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6736#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006737 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006738 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006739
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006740 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006741 }
6742
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006743 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006744 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6745 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006746#ifdef BNX2X_STOP_ON_ERROR
6747 bnx2x_panic();
6748#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006749
Eliezer Tamir49d66772008-02-28 11:53:13 -08006750 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006751}
6752
6753static int bnx2x_setup_leading(struct bnx2x *bp)
6754{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006755 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006756
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006757 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006758 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006759
6760 /* SETUP ramrod */
6761 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6762
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006763 /* Wait for completion */
6764 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006765
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006766 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006767}
6768
6769static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6770{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006771 struct bnx2x_fastpath *fp = &bp->fp[index];
6772
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006773 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006774 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006775
Eliezer Tamir228241e2008-02-28 11:56:57 -08006776 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006777 fp->state = BNX2X_FP_STATE_OPENING;
6778 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6779 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006780
6781 /* Wait for completion */
6782 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006783 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006784}
6785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006787
Eilon Greenstein8badd272009-02-12 08:36:15 +00006788static void bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006790 int num_queues;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006791
Eilon Greenstein8badd272009-02-12 08:36:15 +00006792 switch (int_mode) {
6793 case INT_MODE_INTx:
6794 case INT_MODE_MSI:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006795 num_queues = 1;
6796 bp->num_rx_queues = num_queues;
6797 bp->num_tx_queues = num_queues;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006798 DP(NETIF_MSG_IFUP,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006799 "set number of queues to %d\n", num_queues);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006800 break;
6801
6802 case INT_MODE_MSIX:
6803 default:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006804 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6805 num_queues = min_t(u32, num_online_cpus(),
6806 BNX2X_MAX_QUEUES(bp));
6807 else
6808 num_queues = 1;
6809 bp->num_rx_queues = num_queues;
6810 bp->num_tx_queues = num_queues;
6811 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6812 " number of tx queues to %d\n",
6813 bp->num_rx_queues, bp->num_tx_queues);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006814 /* if we can't use MSI-X we only need one fp,
6815 * so try to enable MSI-X with the requested number of fp's
6816 * and fallback to MSI or legacy INTx with one fp
6817 */
Eilon Greenstein8badd272009-02-12 08:36:15 +00006818 if (bnx2x_enable_msix(bp)) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006819 /* failed to enable MSI-X */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006820 num_queues = 1;
6821 bp->num_rx_queues = num_queues;
6822 bp->num_tx_queues = num_queues;
6823 if (bp->multi_mode)
6824 BNX2X_ERR("Multi requested but failed to "
6825 "enable MSI-X set number of "
6826 "queues to %d\n", num_queues);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006827 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006828 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006829 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006830 bp->dev->real_num_tx_queues = bp->num_tx_queues;
Eilon Greenstein8badd272009-02-12 08:36:15 +00006831}
6832
6833static void bnx2x_set_rx_mode(struct net_device *dev);
6834
6835/* must be called with rtnl_lock */
6836static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6837{
6838 u32 load_code;
6839 int i, rc = 0;
6840#ifdef BNX2X_STOP_ON_ERROR
6841 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6842 if (unlikely(bp->panic))
6843 return -EPERM;
6844#endif
6845
6846 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6847
6848 bnx2x_set_int_mode(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006849
6850 if (bnx2x_alloc_mem(bp))
6851 return -ENOMEM;
6852
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006853 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006854 bnx2x_fp(bp, i, disable_tpa) =
6855 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6856
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006857 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006858 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6859 bnx2x_poll, 128);
6860
6861#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006862 for_each_rx_queue(bp, i) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006863 struct bnx2x_fastpath *fp = &bp->fp[i];
6864
6865 fp->poll_no_work = 0;
6866 fp->poll_calls = 0;
6867 fp->poll_max_calls = 0;
6868 fp->poll_complete = 0;
6869 fp->poll_exit = 0;
6870 }
6871#endif
6872 bnx2x_napi_enable(bp);
6873
6874 if (bp->flags & USING_MSIX_FLAG) {
6875 rc = bnx2x_req_msix_irqs(bp);
6876 if (rc) {
6877 pci_disable_msix(bp->pdev);
6878 goto load_error1;
6879 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006880 } else {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006881 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6882 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006883 bnx2x_ack_int(bp);
6884 rc = bnx2x_req_irq(bp);
6885 if (rc) {
6886 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006887 if (bp->flags & USING_MSI_FLAG)
6888 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006889 goto load_error1;
6890 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006891 if (bp->flags & USING_MSI_FLAG) {
6892 bp->dev->irq = bp->pdev->irq;
6893 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6894 bp->dev->name, bp->pdev->irq);
6895 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006896 }
6897
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006898 /* Send LOAD_REQUEST command to MCP
6899 Returns the type of LOAD command:
6900 if it is the first port to be initialized
6901 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006902 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006903 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006904 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6905 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006906 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006907 rc = -EBUSY;
6908 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006909 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006910 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6911 rc = -EBUSY; /* other port in diagnostic mode */
6912 goto load_error2;
6913 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006914
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006915 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006916 int port = BP_PORT(bp);
6917
Eilon Greensteinf5372252009-02-12 08:38:30 +00006918 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006919 load_count[0], load_count[1], load_count[2]);
6920 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006921 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00006922 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006923 load_count[0], load_count[1], load_count[2]);
6924 if (load_count[0] == 1)
6925 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006926 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006927 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6928 else
6929 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006930 }
6931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006932 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6933 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6934 bp->port.pmf = 1;
6935 else
6936 bp->port.pmf = 0;
6937 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6938
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006940 rc = bnx2x_init_hw(bp, load_code);
6941 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006942 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006943 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006944 }
6945
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006946 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07006947 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006948
6949 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006950 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006951 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6952 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006953 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006954 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006955 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006956 }
6957 }
6958
6959 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6960
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006961 rc = bnx2x_setup_leading(bp);
6962 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006963 BNX2X_ERR("Setup leading failed!\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006964 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006965 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006966
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006967 if (CHIP_IS_E1H(bp))
6968 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00006969 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006970 bp->state = BNX2X_STATE_DISABLED;
6971 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006972
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006973 if (bp->state == BNX2X_STATE_OPEN)
6974 for_each_nondefault_queue(bp, i) {
6975 rc = bnx2x_setup_multi(bp, i);
6976 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006977 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006979
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006980 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006981 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006982 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006983 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006984
6985 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00006986 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006987
6988 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006989 switch (load_mode) {
6990 case LOAD_NORMAL:
6991 /* Tx queue should be only reenabled */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006992 netif_tx_wake_all_queues(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006993 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006994 bnx2x_set_rx_mode(bp->dev);
6995 break;
6996
6997 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006998 netif_tx_start_all_queues(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006999 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007000 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007001 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007003 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007004 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007005 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007006 bp->state = BNX2X_STATE_DIAG;
7007 break;
7008
7009 default:
7010 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007011 }
7012
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007013 if (!bp->port.pmf)
7014 bnx2x__link_status_update(bp);
7015
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007016 /* start the timer */
7017 mod_timer(&bp->timer, jiffies + bp->current_interval);
7018
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007019
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007020 return 0;
7021
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007022load_error3:
7023 bnx2x_int_disable_sync(bp, 1);
7024 if (!BP_NOMCP(bp)) {
7025 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7026 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7027 }
7028 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007029 /* Free SKBs, SGEs, TPA pool and driver internals */
7030 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007031 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007032 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007033load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007034 /* Release IRQs */
7035 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007036load_error1:
7037 bnx2x_napi_disable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007038 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007039 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007040 bnx2x_free_mem(bp);
7041
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007042 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007043}
7044
7045static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7046{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007047 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007048 int rc;
7049
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007050 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007051 fp->state = BNX2X_FP_STATE_HALTING;
7052 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007053
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007054 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007055 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007056 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007057 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007058 return rc;
7059
7060 /* delete cfc entry */
7061 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7062
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007063 /* Wait for completion */
7064 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007065 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007066 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007067}
7068
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007069static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007070{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007071 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007072 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007073 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007074 int cnt = 500;
7075 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007076
7077 might_sleep();
7078
7079 /* Send HALT ramrod */
7080 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007081 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007082
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007083 /* Wait for completion */
7084 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7085 &(bp->fp[0].state), 1);
7086 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007087 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007088
Eliezer Tamir49d66772008-02-28 11:53:13 -08007089 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007090
Eliezer Tamir228241e2008-02-28 11:56:57 -08007091 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007092 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7093
Eliezer Tamir49d66772008-02-28 11:53:13 -08007094 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007095 we are going to reset the chip anyway
7096 so there is not much to do if this times out
7097 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007098 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007099 if (!cnt) {
7100 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7101 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7102 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7103#ifdef BNX2X_STOP_ON_ERROR
7104 bnx2x_panic();
7105#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007106 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007107 break;
7108 }
7109 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007110 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007111 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007112 }
7113 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7114 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007115
7116 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007117}
7118
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007119static void bnx2x_reset_func(struct bnx2x *bp)
7120{
7121 int port = BP_PORT(bp);
7122 int func = BP_FUNC(bp);
7123 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007124
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007125 /* Configure IGU */
7126 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7127 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7128
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007129 /* Clear ILT */
7130 base = FUNC_ILT_BASE(func);
7131 for (i = base; i < base + ILT_PER_FUNC; i++)
7132 bnx2x_ilt_wr(bp, i, 0);
7133}
7134
7135static void bnx2x_reset_port(struct bnx2x *bp)
7136{
7137 int port = BP_PORT(bp);
7138 u32 val;
7139
7140 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7141
7142 /* Do not rcv packets to BRB */
7143 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7144 /* Do not direct rcv packets that are not for MCP to the BRB */
7145 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7146 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7147
7148 /* Configure AEU */
7149 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7150
7151 msleep(100);
7152 /* Check for BRB port occupancy */
7153 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7154 if (val)
7155 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007156 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007157
7158 /* TODO: Close Doorbell port? */
7159}
7160
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007161static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7162{
7163 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7164 BP_FUNC(bp), reset_code);
7165
7166 switch (reset_code) {
7167 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7168 bnx2x_reset_port(bp);
7169 bnx2x_reset_func(bp);
7170 bnx2x_reset_common(bp);
7171 break;
7172
7173 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7174 bnx2x_reset_port(bp);
7175 bnx2x_reset_func(bp);
7176 break;
7177
7178 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7179 bnx2x_reset_func(bp);
7180 break;
7181
7182 default:
7183 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7184 break;
7185 }
7186}
7187
Eilon Greenstein33471622008-08-13 15:59:08 -07007188/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007189static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007190{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007191 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007192 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007193 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007194
7195 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7196
Eliezer Tamir228241e2008-02-28 11:56:57 -08007197 bp->rx_mode = BNX2X_RX_MODE_NONE;
7198 bnx2x_set_storm_rx_mode(bp);
7199
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007200 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007201
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007202 del_timer_sync(&bp->timer);
7203 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7204 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007205 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007206
Eilon Greenstein70b99862009-01-14 06:43:48 +00007207 /* Release IRQs */
7208 bnx2x_free_irq(bp);
7209
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007210 /* Wait until tx fastpath tasks complete */
7211 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007212 struct bnx2x_fastpath *fp = &bp->fp[i];
7213
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007214 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007215 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007216
Eilon Greenstein7961f792009-03-02 07:59:31 +00007217 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007218 if (!cnt) {
7219 BNX2X_ERR("timeout waiting for queue[%d]\n",
7220 i);
7221#ifdef BNX2X_STOP_ON_ERROR
7222 bnx2x_panic();
7223 return -EBUSY;
7224#else
7225 break;
7226#endif
7227 }
7228 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007229 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007230 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007231 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007232 /* Give HW time to discard old tx messages */
7233 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007234
Yitchak Gertner65abd742008-08-25 15:26:24 -07007235 if (CHIP_IS_E1(bp)) {
7236 struct mac_configuration_cmd *config =
7237 bnx2x_sp(bp, mcast_config);
7238
7239 bnx2x_set_mac_addr_e1(bp, 0);
7240
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007241 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007242 CAM_INVALIDATE(config->config_table[i]);
7243
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007244 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007245 if (CHIP_REV_IS_SLOW(bp))
7246 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7247 else
7248 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007249 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007250 config->hdr.reserved1 = 0;
7251
7252 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7254 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7255
7256 } else { /* E1H */
7257 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7258
7259 bnx2x_set_mac_addr_e1h(bp, 0);
7260
7261 for (i = 0; i < MC_HASH_SIZE; i++)
7262 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7263 }
7264
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007265 if (unload_mode == UNLOAD_NORMAL)
7266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007267
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007268 else if (bp->flags & NO_WOL_FLAG) {
7269 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7270 if (CHIP_IS_E1H(bp))
7271 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7272
7273 } else if (bp->wol) {
7274 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007275 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007276 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007277 /* The mac address is written to entries 1-4 to
7278 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007279 u8 entry = (BP_E1HVN(bp) + 1)*8;
7280
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007281 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007282 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007283
7284 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7285 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007286 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007287
7288 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007289
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007290 } else
7291 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7292
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007293 /* Close multi and leading connections
7294 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007295 for_each_nondefault_queue(bp, i)
7296 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007297 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007298
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007299 rc = bnx2x_stop_leading(bp);
7300 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007301 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007302#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007303 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007304#else
7305 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007306#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007307 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007308
Eliezer Tamir228241e2008-02-28 11:56:57 -08007309unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007310 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007311 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007312 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007313 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007314 load_count[0], load_count[1], load_count[2]);
7315 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007316 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007317 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007318 load_count[0], load_count[1], load_count[2]);
7319 if (load_count[0] == 0)
7320 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007321 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007322 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7323 else
7324 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7325 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007326
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007327 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7328 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7329 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007330
7331 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007332 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007333
7334 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007335 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007336 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007337
Eilon Greenstein9a035442008-11-03 16:45:55 -08007338 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007339
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007340 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007341 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007342 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007343 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007344 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007345 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007346 bnx2x_free_mem(bp);
7347
7348 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007349
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350 netif_carrier_off(bp->dev);
7351
7352 return 0;
7353}
7354
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007355static void bnx2x_reset_task(struct work_struct *work)
7356{
7357 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7358
7359#ifdef BNX2X_STOP_ON_ERROR
7360 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7361 " so reset not done to allow debug dump,\n"
7362 KERN_ERR " you will need to reboot when done\n");
7363 return;
7364#endif
7365
7366 rtnl_lock();
7367
7368 if (!netif_running(bp->dev))
7369 goto reset_task_exit;
7370
7371 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7372 bnx2x_nic_load(bp, LOAD_NORMAL);
7373
7374reset_task_exit:
7375 rtnl_unlock();
7376}
7377
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007378/* end of nic load/unload */
7379
7380/* ethtool_ops */
7381
7382/*
7383 * Init service functions
7384 */
7385
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007386static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7387{
7388 switch (func) {
7389 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7390 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7391 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7392 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7393 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7394 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7395 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7396 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7397 default:
7398 BNX2X_ERR("Unsupported function index: %d\n", func);
7399 return (u32)(-1);
7400 }
7401}
7402
7403static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7404{
7405 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7406
7407 /* Flush all outstanding writes */
7408 mmiowb();
7409
7410 /* Pretend to be function 0 */
7411 REG_WR(bp, reg, 0);
7412 /* Flush the GRC transaction (in the chip) */
7413 new_val = REG_RD(bp, reg);
7414 if (new_val != 0) {
7415 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7416 new_val);
7417 BUG();
7418 }
7419
7420 /* From now we are in the "like-E1" mode */
7421 bnx2x_int_disable(bp);
7422
7423 /* Flush all outstanding writes */
7424 mmiowb();
7425
7426 /* Restore the original funtion settings */
7427 REG_WR(bp, reg, orig_func);
7428 new_val = REG_RD(bp, reg);
7429 if (new_val != orig_func) {
7430 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7431 orig_func, new_val);
7432 BUG();
7433 }
7434}
7435
7436static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7437{
7438 if (CHIP_IS_E1H(bp))
7439 bnx2x_undi_int_disable_e1h(bp, func);
7440 else
7441 bnx2x_int_disable(bp);
7442}
7443
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007444static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007445{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007446 u32 val;
7447
7448 /* Check if there is any driver already loaded */
7449 val = REG_RD(bp, MISC_REG_UNPREPARED);
7450 if (val == 0x1) {
7451 /* Check if it is the UNDI driver
7452 * UNDI driver initializes CID offset for normal bell to 0x7
7453 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007454 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007455 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7456 if (val == 0x7) {
7457 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007458 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007459 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007460 u32 swap_en;
7461 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007462
Eilon Greensteinb4661732009-01-14 06:43:56 +00007463 /* clear the UNDI indication */
7464 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7465
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007466 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7467
7468 /* try unload UNDI on port 0 */
7469 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007470 bp->fw_seq =
7471 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7472 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007473 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007474
7475 /* if UNDI is loaded on the other port */
7476 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7477
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007478 /* send "DONE" for previous unload */
7479 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7480
7481 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007482 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007483 bp->fw_seq =
7484 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7485 DRV_MSG_SEQ_NUMBER_MASK);
7486 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007487
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007488 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007489 }
7490
Eilon Greensteinb4661732009-01-14 06:43:56 +00007491 /* now it's safe to release the lock */
7492 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7493
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007494 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007495
7496 /* close input traffic and wait for it */
7497 /* Do not rcv packets to BRB */
7498 REG_WR(bp,
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7500 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7501 /* Do not direct rcv packets that are not for MCP to
7502 * the BRB */
7503 REG_WR(bp,
7504 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7505 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7506 /* clear AEU */
7507 REG_WR(bp,
7508 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7509 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7510 msleep(10);
7511
7512 /* save NIG port swap info */
7513 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7514 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007515 /* reset device */
7516 REG_WR(bp,
7517 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007518 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007519 REG_WR(bp,
7520 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7521 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007522 /* take the NIG out of reset and restore swap values */
7523 REG_WR(bp,
7524 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7525 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7526 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7527 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7528
7529 /* send unload done to the MCP */
7530 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7531
7532 /* restore our func and fw_seq */
7533 bp->func = func;
7534 bp->fw_seq =
7535 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7536 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007537
7538 } else
7539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007540 }
7541}
7542
7543static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7544{
7545 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007546 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007547
7548 /* Get the chip revision id and number. */
7549 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7550 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7551 id = ((val & 0xffff) << 16);
7552 val = REG_RD(bp, MISC_REG_CHIP_REV);
7553 id |= ((val & 0xf) << 12);
7554 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7555 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007556 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007557 id |= (val & 0xf);
7558 bp->common.chip_id = id;
7559 bp->link_params.chip_id = bp->common.chip_id;
7560 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7561
Eilon Greenstein1c063282009-02-12 08:36:43 +00007562 val = (REG_RD(bp, 0x2874) & 0x55);
7563 if ((bp->common.chip_id & 0x1) ||
7564 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7565 bp->flags |= ONE_PORT_FLAG;
7566 BNX2X_DEV_INFO("single port device\n");
7567 }
7568
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007569 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7570 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7571 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7572 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7573 bp->common.flash_size, bp->common.flash_size);
7574
7575 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7576 bp->link_params.shmem_base = bp->common.shmem_base;
7577 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7578
7579 if (!bp->common.shmem_base ||
7580 (bp->common.shmem_base < 0xA0000) ||
7581 (bp->common.shmem_base >= 0xC0000)) {
7582 BNX2X_DEV_INFO("MCP not active\n");
7583 bp->flags |= NO_MCP_FLAG;
7584 return;
7585 }
7586
7587 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7588 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7589 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7590 BNX2X_ERR("BAD MCP validity signature\n");
7591
7592 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007593 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007594
7595 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7596 SHARED_HW_CFG_LED_MODE_MASK) >>
7597 SHARED_HW_CFG_LED_MODE_SHIFT);
7598
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007599 bp->link_params.feature_config_flags = 0;
7600 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7601 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7602 bp->link_params.feature_config_flags |=
7603 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7604 else
7605 bp->link_params.feature_config_flags &=
7606 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7607
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007608 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7609 bp->common.bc_ver = val;
7610 BNX2X_DEV_INFO("bc_ver %X\n", val);
7611 if (val < BNX2X_BC_VER) {
7612 /* for now only warn
7613 * later we might need to enforce this */
7614 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7615 " please upgrade BC\n", BNX2X_BC_VER, val);
7616 }
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007617
7618 if (BP_E1HVN(bp) == 0) {
7619 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7620 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7621 } else {
7622 /* no WOL capability for E1HVN != 0 */
7623 bp->flags |= NO_WOL_FLAG;
7624 }
7625 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007626 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007627
7628 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7629 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7630 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7631 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7632
7633 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7634 val, val2, val3, val4);
7635}
7636
7637static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7638 u32 switch_cfg)
7639{
7640 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007641 u32 ext_phy_type;
7642
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007643 switch (switch_cfg) {
7644 case SWITCH_CFG_1G:
7645 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7646
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007647 ext_phy_type =
7648 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007649 switch (ext_phy_type) {
7650 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7651 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7652 ext_phy_type);
7653
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007654 bp->port.supported |= (SUPPORTED_10baseT_Half |
7655 SUPPORTED_10baseT_Full |
7656 SUPPORTED_100baseT_Half |
7657 SUPPORTED_100baseT_Full |
7658 SUPPORTED_1000baseT_Full |
7659 SUPPORTED_2500baseX_Full |
7660 SUPPORTED_TP |
7661 SUPPORTED_FIBRE |
7662 SUPPORTED_Autoneg |
7663 SUPPORTED_Pause |
7664 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007665 break;
7666
7667 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7668 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7669 ext_phy_type);
7670
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007671 bp->port.supported |= (SUPPORTED_10baseT_Half |
7672 SUPPORTED_10baseT_Full |
7673 SUPPORTED_100baseT_Half |
7674 SUPPORTED_100baseT_Full |
7675 SUPPORTED_1000baseT_Full |
7676 SUPPORTED_TP |
7677 SUPPORTED_FIBRE |
7678 SUPPORTED_Autoneg |
7679 SUPPORTED_Pause |
7680 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007681 break;
7682
7683 default:
7684 BNX2X_ERR("NVRAM config error. "
7685 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007686 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007687 return;
7688 }
7689
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007690 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7691 port*0x10);
7692 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007693 break;
7694
7695 case SWITCH_CFG_10G:
7696 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7697
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007698 ext_phy_type =
7699 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007700 switch (ext_phy_type) {
7701 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7702 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7703 ext_phy_type);
7704
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007705 bp->port.supported |= (SUPPORTED_10baseT_Half |
7706 SUPPORTED_10baseT_Full |
7707 SUPPORTED_100baseT_Half |
7708 SUPPORTED_100baseT_Full |
7709 SUPPORTED_1000baseT_Full |
7710 SUPPORTED_2500baseX_Full |
7711 SUPPORTED_10000baseT_Full |
7712 SUPPORTED_TP |
7713 SUPPORTED_FIBRE |
7714 SUPPORTED_Autoneg |
7715 SUPPORTED_Pause |
7716 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007717 break;
7718
Eliezer Tamirf1410642008-02-28 11:51:50 -08007719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7720 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7721 ext_phy_type);
7722
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007723 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7724 SUPPORTED_1000baseT_Full |
7725 SUPPORTED_FIBRE |
7726 SUPPORTED_Autoneg |
7727 SUPPORTED_Pause |
7728 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007729 break;
7730
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7732 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7733 ext_phy_type);
7734
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007735 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7736 SUPPORTED_2500baseX_Full |
7737 SUPPORTED_1000baseT_Full |
7738 SUPPORTED_FIBRE |
7739 SUPPORTED_Autoneg |
7740 SUPPORTED_Pause |
7741 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007742 break;
7743
Eilon Greenstein589abe32009-02-12 08:36:55 +00007744 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7745 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7746 ext_phy_type);
7747
7748 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7749 SUPPORTED_FIBRE |
7750 SUPPORTED_Pause |
7751 SUPPORTED_Asym_Pause);
7752 break;
7753
7754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7755 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7756 ext_phy_type);
7757
7758 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7759 SUPPORTED_1000baseT_Full |
7760 SUPPORTED_FIBRE |
7761 SUPPORTED_Pause |
7762 SUPPORTED_Asym_Pause);
7763 break;
7764
7765 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7766 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7767 ext_phy_type);
7768
7769 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7770 SUPPORTED_1000baseT_Full |
7771 SUPPORTED_Autoneg |
7772 SUPPORTED_FIBRE |
7773 SUPPORTED_Pause |
7774 SUPPORTED_Asym_Pause);
7775 break;
7776
Eliezer Tamirf1410642008-02-28 11:51:50 -08007777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7778 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7779 ext_phy_type);
7780
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007781 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7782 SUPPORTED_TP |
7783 SUPPORTED_Autoneg |
7784 SUPPORTED_Pause |
7785 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007786 break;
7787
Eilon Greenstein28577182009-02-12 08:37:00 +00007788 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7789 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7790 ext_phy_type);
7791
7792 bp->port.supported |= (SUPPORTED_10baseT_Half |
7793 SUPPORTED_10baseT_Full |
7794 SUPPORTED_100baseT_Half |
7795 SUPPORTED_100baseT_Full |
7796 SUPPORTED_1000baseT_Full |
7797 SUPPORTED_10000baseT_Full |
7798 SUPPORTED_TP |
7799 SUPPORTED_Autoneg |
7800 SUPPORTED_Pause |
7801 SUPPORTED_Asym_Pause);
7802 break;
7803
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007804 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7805 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7806 bp->link_params.ext_phy_config);
7807 break;
7808
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007809 default:
7810 BNX2X_ERR("NVRAM config error. "
7811 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007812 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007813 return;
7814 }
7815
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007816 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7817 port*0x18);
7818 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007819
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007820 break;
7821
7822 default:
7823 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007824 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007825 return;
7826 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007827 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007828
7829 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007830 if (!(bp->link_params.speed_cap_mask &
7831 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007832 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007833
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007834 if (!(bp->link_params.speed_cap_mask &
7835 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007836 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007837
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007838 if (!(bp->link_params.speed_cap_mask &
7839 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007840 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007841
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007842 if (!(bp->link_params.speed_cap_mask &
7843 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007844 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007845
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007848 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7849 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007850
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007851 if (!(bp->link_params.speed_cap_mask &
7852 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007853 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007854
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007855 if (!(bp->link_params.speed_cap_mask &
7856 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007857 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007858
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007859 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007860}
7861
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007862static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007863{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007864 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007865
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007866 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007867 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007868 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007869 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007870 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007871 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007872 u32 ext_phy_type =
7873 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7874
7875 if ((ext_phy_type ==
7876 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7877 (ext_phy_type ==
7878 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007879 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007880 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007881 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007882 (ADVERTISED_10000baseT_Full |
7883 ADVERTISED_FIBRE);
7884 break;
7885 }
7886 BNX2X_ERR("NVRAM config error. "
7887 "Invalid link_config 0x%x"
7888 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007889 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007890 return;
7891 }
7892 break;
7893
7894 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007895 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007896 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007897 bp->port.advertising = (ADVERTISED_10baseT_Full |
7898 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007899 } else {
7900 BNX2X_ERR("NVRAM config error. "
7901 "Invalid link_config 0x%x"
7902 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007903 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007904 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007905 return;
7906 }
7907 break;
7908
7909 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007910 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007911 bp->link_params.req_line_speed = SPEED_10;
7912 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007913 bp->port.advertising = (ADVERTISED_10baseT_Half |
7914 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007915 } else {
7916 BNX2X_ERR("NVRAM config error. "
7917 "Invalid link_config 0x%x"
7918 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007919 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007920 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007921 return;
7922 }
7923 break;
7924
7925 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007926 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007927 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007928 bp->port.advertising = (ADVERTISED_100baseT_Full |
7929 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007930 } else {
7931 BNX2X_ERR("NVRAM config error. "
7932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007934 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007935 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007936 return;
7937 }
7938 break;
7939
7940 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007941 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007942 bp->link_params.req_line_speed = SPEED_100;
7943 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007944 bp->port.advertising = (ADVERTISED_100baseT_Half |
7945 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007946 } else {
7947 BNX2X_ERR("NVRAM config error. "
7948 "Invalid link_config 0x%x"
7949 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007950 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007951 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007952 return;
7953 }
7954 break;
7955
7956 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007957 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007958 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007959 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7960 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007961 } else {
7962 BNX2X_ERR("NVRAM config error. "
7963 "Invalid link_config 0x%x"
7964 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007965 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007966 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007967 return;
7968 }
7969 break;
7970
7971 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007972 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007973 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007974 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7975 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007976 } else {
7977 BNX2X_ERR("NVRAM config error. "
7978 "Invalid link_config 0x%x"
7979 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007980 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007981 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007982 return;
7983 }
7984 break;
7985
7986 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7987 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7988 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007990 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007991 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7992 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007993 } else {
7994 BNX2X_ERR("NVRAM config error. "
7995 "Invalid link_config 0x%x"
7996 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007997 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007998 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007999 return;
8000 }
8001 break;
8002
8003 default:
8004 BNX2X_ERR("NVRAM config error. "
8005 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008006 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008007 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008008 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008009 break;
8010 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008011
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008012 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8013 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008014 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008015 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008016 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008017
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008018 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008019 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008020 bp->link_params.req_line_speed,
8021 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008022 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008023}
8024
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008025static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008026{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008027 int port = BP_PORT(bp);
8028 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008029 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008030 u16 i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008031
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008032 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008033 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008034
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008035 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008036 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008037 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008038 SHMEM_RD(bp,
8039 dev_info.port_hw_config[port].external_phy_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008040 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008041 SHMEM_RD(bp,
8042 dev_info.port_hw_config[port].speed_capability_mask);
8043
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008044 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008045 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8046
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008047 /* Get the 4 lanes xgxs config rx and tx */
8048 for (i = 0; i < 2; i++) {
8049 val = SHMEM_RD(bp,
8050 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8051 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8053
8054 val = SHMEM_RD(bp,
8055 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8056 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8057 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8058 }
8059
Eilon Greenstein589abe32009-02-12 08:36:55 +00008060 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8061 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8062 bp->link_params.feature_config_flags |=
8063 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8064 else
8065 bp->link_params.feature_config_flags &=
8066 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8067
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008068 /* If the device is capable of WoL, set the default state according
8069 * to the HW
8070 */
8071 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8072 (config & PORT_FEATURE_WOL_ENABLED));
8073
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008074 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8075 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008076 bp->link_params.lane_config,
8077 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008078 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008079
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008080 bp->link_params.switch_cfg = (bp->port.link_config &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008081 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8082 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008083
8084 bnx2x_link_settings_requested(bp);
8085
8086 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8087 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8088 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8089 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8090 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8091 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8092 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8093 bp->dev->dev_addr[5] = (u8)(val & 0xff);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008094 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8095 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008096}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008097
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008098static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8099{
8100 int func = BP_FUNC(bp);
8101 u32 val, val2;
8102 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008103
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008104 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008105
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008106 bp->e1hov = 0;
8107 bp->e1hmf = 0;
8108 if (CHIP_IS_E1H(bp)) {
8109 bp->mf_config =
8110 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008111
Eilon Greenstein3196a882008-08-13 15:58:49 -07008112 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8113 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008114 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008116 bp->e1hov = val;
8117 bp->e1hmf = 1;
8118 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8119 "(0x%04x)\n",
8120 func, bp->e1hov, bp->e1hov);
8121 } else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00008122 BNX2X_DEV_INFO("single function mode\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008123 if (BP_E1HVN(bp)) {
8124 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8125 " aborting\n", func);
8126 rc = -EPERM;
8127 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008128 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008129 }
8130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008131 if (!BP_NOMCP(bp)) {
8132 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008133
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008134 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8135 DRV_MSG_SEQ_NUMBER_MASK);
8136 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8137 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008138
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008139 if (IS_E1HMF(bp)) {
8140 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8141 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8142 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8143 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8144 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8145 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8146 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8147 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8148 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8149 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8150 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8151 ETH_ALEN);
8152 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8153 ETH_ALEN);
8154 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008155
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008156 return rc;
8157 }
8158
8159 if (BP_NOMCP(bp)) {
8160 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008161 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008162 random_ether_addr(bp->dev->dev_addr);
8163 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8164 }
8165
8166 return rc;
8167}
8168
8169static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8170{
8171 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008172 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008173 int rc;
8174
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008175 /* Disable interrupt handling until HW is initialized */
8176 atomic_set(&bp->intr_sem, 1);
8177
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008178 mutex_init(&bp->port.phy_mutex);
8179
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008180 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008181 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8182
8183 rc = bnx2x_get_hwinfo(bp);
8184
8185 /* need to reset chip if undi was active */
8186 if (!BP_NOMCP(bp))
8187 bnx2x_undi_unload(bp);
8188
8189 if (CHIP_REV_IS_FPGA(bp))
8190 printk(KERN_ERR PFX "FPGA detected\n");
8191
8192 if (BP_NOMCP(bp) && (func == 0))
8193 printk(KERN_ERR PFX
8194 "MCP disabled, must load devices in order!\n");
8195
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008196 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008197 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8198 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008199 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008200 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008201 multi_mode = ETH_RSS_MODE_DISABLED;
8202 }
8203 bp->multi_mode = multi_mode;
8204
8205
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008206 /* Set TPA flags */
8207 if (disable_tpa) {
8208 bp->flags &= ~TPA_ENABLE_FLAG;
8209 bp->dev->features &= ~NETIF_F_LRO;
8210 } else {
8211 bp->flags |= TPA_ENABLE_FLAG;
8212 bp->dev->features |= NETIF_F_LRO;
8213 }
8214
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008215 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008216
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008217 bp->tx_ring_size = MAX_TX_AVAIL;
8218 bp->rx_ring_size = MAX_RX_AVAIL;
8219
8220 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008221
8222 bp->tx_ticks = 50;
8223 bp->rx_ticks = 25;
8224
Eilon Greenstein87942b42009-02-12 08:36:49 +00008225 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8226 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008227
8228 init_timer(&bp->timer);
8229 bp->timer.expires = jiffies + bp->current_interval;
8230 bp->timer.data = (unsigned long) bp;
8231 bp->timer.function = bnx2x_timer;
8232
8233 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008234}
8235
8236/*
8237 * ethtool service functions
8238 */
8239
8240/* All ethtool functions called with rtnl_lock */
8241
8242static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8243{
8244 struct bnx2x *bp = netdev_priv(dev);
8245
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008246 cmd->supported = bp->port.supported;
8247 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008248
8249 if (netif_carrier_ok(dev)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008250 cmd->speed = bp->link_vars.line_speed;
8251 cmd->duplex = bp->link_vars.duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008252 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008253 cmd->speed = bp->link_params.req_line_speed;
8254 cmd->duplex = bp->link_params.req_duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008255 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008256 if (IS_E1HMF(bp)) {
8257 u16 vn_max_rate;
8258
8259 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8260 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8261 if (vn_max_rate < cmd->speed)
8262 cmd->speed = vn_max_rate;
8263 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008264
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008265 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8266 u32 ext_phy_type =
8267 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008268
8269 switch (ext_phy_type) {
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00008273 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008276 cmd->port = PORT_FIBRE;
8277 break;
8278
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00008280 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008281 cmd->port = PORT_TP;
8282 break;
8283
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8285 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8286 bp->link_params.ext_phy_config);
8287 break;
8288
Eliezer Tamirf1410642008-02-28 11:51:50 -08008289 default:
8290 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008291 bp->link_params.ext_phy_config);
8292 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008293 }
8294 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008295 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008296
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008297 cmd->phy_address = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008298 cmd->transceiver = XCVR_INTERNAL;
8299
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008300 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008301 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008302 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008303 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008304
8305 cmd->maxtxpkt = 0;
8306 cmd->maxrxpkt = 0;
8307
8308 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8309 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8310 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8311 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8312 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8313 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8314 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8315
8316 return 0;
8317}
8318
8319static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8320{
8321 struct bnx2x *bp = netdev_priv(dev);
8322 u32 advertising;
8323
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008324 if (IS_E1HMF(bp))
8325 return 0;
8326
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008327 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8328 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8329 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8330 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8331 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8332 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8333 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8334
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008335 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008336 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8337 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008338 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008339 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008340
8341 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008342 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008343
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008344 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8345 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008346 bp->port.advertising |= (ADVERTISED_Autoneg |
8347 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008348
8349 } else { /* forced speed */
8350 /* advertise the requested speed and duplex if supported */
8351 switch (cmd->speed) {
8352 case SPEED_10:
8353 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008354 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008355 SUPPORTED_10baseT_Full)) {
8356 DP(NETIF_MSG_LINK,
8357 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008358 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008359 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008360
8361 advertising = (ADVERTISED_10baseT_Full |
8362 ADVERTISED_TP);
8363 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008364 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008365 SUPPORTED_10baseT_Half)) {
8366 DP(NETIF_MSG_LINK,
8367 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008368 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008369 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008370
8371 advertising = (ADVERTISED_10baseT_Half |
8372 ADVERTISED_TP);
8373 }
8374 break;
8375
8376 case SPEED_100:
8377 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008378 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008379 SUPPORTED_100baseT_Full)) {
8380 DP(NETIF_MSG_LINK,
8381 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008382 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008383 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008384
8385 advertising = (ADVERTISED_100baseT_Full |
8386 ADVERTISED_TP);
8387 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008388 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008389 SUPPORTED_100baseT_Half)) {
8390 DP(NETIF_MSG_LINK,
8391 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008392 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008393 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008394
8395 advertising = (ADVERTISED_100baseT_Half |
8396 ADVERTISED_TP);
8397 }
8398 break;
8399
8400 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008401 if (cmd->duplex != DUPLEX_FULL) {
8402 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008403 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008404 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008405
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008406 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008407 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008408 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008409 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008410
8411 advertising = (ADVERTISED_1000baseT_Full |
8412 ADVERTISED_TP);
8413 break;
8414
8415 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008416 if (cmd->duplex != DUPLEX_FULL) {
8417 DP(NETIF_MSG_LINK,
8418 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008419 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008420 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008421
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008422 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008423 DP(NETIF_MSG_LINK,
8424 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008425 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008426 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008427
Eliezer Tamirf1410642008-02-28 11:51:50 -08008428 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008429 ADVERTISED_TP);
8430 break;
8431
8432 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008433 if (cmd->duplex != DUPLEX_FULL) {
8434 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008435 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008436 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008437
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008438 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008439 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008440 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008441 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008442
8443 advertising = (ADVERTISED_10000baseT_Full |
8444 ADVERTISED_FIBRE);
8445 break;
8446
8447 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008448 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008449 return -EINVAL;
8450 }
8451
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008452 bp->link_params.req_line_speed = cmd->speed;
8453 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008454 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008455 }
8456
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008457 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008458 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008459 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008460 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008461
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008462 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008464 bnx2x_link_set(bp);
8465 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008466
8467 return 0;
8468}
8469
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008470#define PHY_FW_VER_LEN 10
8471
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008472static void bnx2x_get_drvinfo(struct net_device *dev,
8473 struct ethtool_drvinfo *info)
8474{
8475 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008476 u8 phy_fw_ver[PHY_FW_VER_LEN];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008477
8478 strcpy(info->driver, DRV_MODULE_NAME);
8479 strcpy(info->version, DRV_MODULE_VERSION);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008480
8481 phy_fw_ver[0] = '\0';
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008482 if (bp->port.pmf) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008483 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008484 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8485 (bp->state != BNX2X_STATE_CLOSED),
8486 phy_fw_ver, PHY_FW_VER_LEN);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008487 bnx2x_release_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008488 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008489
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008490 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8491 (bp->common.bc_ver & 0xff0000) >> 16,
8492 (bp->common.bc_ver & 0xff00) >> 8,
8493 (bp->common.bc_ver & 0xff),
8494 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008495 strcpy(info->bus_info, pci_name(bp->pdev));
8496 info->n_stats = BNX2X_NUM_STATS;
8497 info->testinfo_len = BNX2X_NUM_TESTS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008498 info->eedump_len = bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008499 info->regdump_len = 0;
8500}
8501
8502static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8503{
8504 struct bnx2x *bp = netdev_priv(dev);
8505
8506 if (bp->flags & NO_WOL_FLAG) {
8507 wol->supported = 0;
8508 wol->wolopts = 0;
8509 } else {
8510 wol->supported = WAKE_MAGIC;
8511 if (bp->wol)
8512 wol->wolopts = WAKE_MAGIC;
8513 else
8514 wol->wolopts = 0;
8515 }
8516 memset(&wol->sopass, 0, sizeof(wol->sopass));
8517}
8518
8519static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8520{
8521 struct bnx2x *bp = netdev_priv(dev);
8522
8523 if (wol->wolopts & ~WAKE_MAGIC)
8524 return -EINVAL;
8525
8526 if (wol->wolopts & WAKE_MAGIC) {
8527 if (bp->flags & NO_WOL_FLAG)
8528 return -EINVAL;
8529
8530 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008531 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008532 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008533
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008534 return 0;
8535}
8536
8537static u32 bnx2x_get_msglevel(struct net_device *dev)
8538{
8539 struct bnx2x *bp = netdev_priv(dev);
8540
8541 return bp->msglevel;
8542}
8543
8544static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8545{
8546 struct bnx2x *bp = netdev_priv(dev);
8547
8548 if (capable(CAP_NET_ADMIN))
8549 bp->msglevel = level;
8550}
8551
8552static int bnx2x_nway_reset(struct net_device *dev)
8553{
8554 struct bnx2x *bp = netdev_priv(dev);
8555
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008556 if (!bp->port.pmf)
8557 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008558
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008559 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008560 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008561 bnx2x_link_set(bp);
8562 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008563
8564 return 0;
8565}
8566
8567static int bnx2x_get_eeprom_len(struct net_device *dev)
8568{
8569 struct bnx2x *bp = netdev_priv(dev);
8570
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008571 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008572}
8573
8574static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8575{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008576 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008577 int count, i;
8578 u32 val = 0;
8579
8580 /* adjust timeout for emulation/FPGA */
8581 count = NVRAM_TIMEOUT_COUNT;
8582 if (CHIP_REV_IS_SLOW(bp))
8583 count *= 100;
8584
8585 /* request access to nvram interface */
8586 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8587 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8588
8589 for (i = 0; i < count*10; i++) {
8590 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8591 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8592 break;
8593
8594 udelay(5);
8595 }
8596
8597 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008598 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008599 return -EBUSY;
8600 }
8601
8602 return 0;
8603}
8604
8605static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8606{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008607 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008608 int count, i;
8609 u32 val = 0;
8610
8611 /* adjust timeout for emulation/FPGA */
8612 count = NVRAM_TIMEOUT_COUNT;
8613 if (CHIP_REV_IS_SLOW(bp))
8614 count *= 100;
8615
8616 /* relinquish nvram interface */
8617 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8618 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8619
8620 for (i = 0; i < count*10; i++) {
8621 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8622 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8623 break;
8624
8625 udelay(5);
8626 }
8627
8628 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008629 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008630 return -EBUSY;
8631 }
8632
8633 return 0;
8634}
8635
8636static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8637{
8638 u32 val;
8639
8640 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8641
8642 /* enable both bits, even on read */
8643 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8644 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8645 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8646}
8647
8648static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8649{
8650 u32 val;
8651
8652 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8653
8654 /* disable both bits, even after read */
8655 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8656 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8657 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8658}
8659
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008660static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008661 u32 cmd_flags)
8662{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008663 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008664 u32 val;
8665
8666 /* build the command word */
8667 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8668
8669 /* need to clear DONE bit separately */
8670 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8671
8672 /* address of the NVRAM to read from */
8673 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8674 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8675
8676 /* issue a read command */
8677 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8678
8679 /* adjust timeout for emulation/FPGA */
8680 count = NVRAM_TIMEOUT_COUNT;
8681 if (CHIP_REV_IS_SLOW(bp))
8682 count *= 100;
8683
8684 /* wait for completion */
8685 *ret_val = 0;
8686 rc = -EBUSY;
8687 for (i = 0; i < count; i++) {
8688 udelay(5);
8689 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8690
8691 if (val & MCPR_NVM_COMMAND_DONE) {
8692 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008693 /* we read nvram data in cpu order
8694 * but ethtool sees it as an array of bytes
8695 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008696 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008697 rc = 0;
8698 break;
8699 }
8700 }
8701
8702 return rc;
8703}
8704
8705static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8706 int buf_size)
8707{
8708 int rc;
8709 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008710 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008711
8712 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008713 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008714 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008715 offset, buf_size);
8716 return -EINVAL;
8717 }
8718
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008719 if (offset + buf_size > bp->common.flash_size) {
8720 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008721 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008722 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008723 return -EINVAL;
8724 }
8725
8726 /* request access to nvram interface */
8727 rc = bnx2x_acquire_nvram_lock(bp);
8728 if (rc)
8729 return rc;
8730
8731 /* enable access to nvram interface */
8732 bnx2x_enable_nvram_access(bp);
8733
8734 /* read the first word(s) */
8735 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8736 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8737 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8738 memcpy(ret_buf, &val, 4);
8739
8740 /* advance to the next dword */
8741 offset += sizeof(u32);
8742 ret_buf += sizeof(u32);
8743 buf_size -= sizeof(u32);
8744 cmd_flags = 0;
8745 }
8746
8747 if (rc == 0) {
8748 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8749 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8750 memcpy(ret_buf, &val, 4);
8751 }
8752
8753 /* disable access to nvram interface */
8754 bnx2x_disable_nvram_access(bp);
8755 bnx2x_release_nvram_lock(bp);
8756
8757 return rc;
8758}
8759
8760static int bnx2x_get_eeprom(struct net_device *dev,
8761 struct ethtool_eeprom *eeprom, u8 *eebuf)
8762{
8763 struct bnx2x *bp = netdev_priv(dev);
8764 int rc;
8765
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00008766 if (!netif_running(dev))
8767 return -EAGAIN;
8768
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008769 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008770 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8771 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8772 eeprom->len, eeprom->len);
8773
8774 /* parameters already validated in ethtool_get_eeprom */
8775
8776 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8777
8778 return rc;
8779}
8780
8781static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8782 u32 cmd_flags)
8783{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008784 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008785
8786 /* build the command word */
8787 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8788
8789 /* need to clear DONE bit separately */
8790 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8791
8792 /* write the data */
8793 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8794
8795 /* address of the NVRAM to write to */
8796 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8797 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8798
8799 /* issue the write command */
8800 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8801
8802 /* adjust timeout for emulation/FPGA */
8803 count = NVRAM_TIMEOUT_COUNT;
8804 if (CHIP_REV_IS_SLOW(bp))
8805 count *= 100;
8806
8807 /* wait for completion */
8808 rc = -EBUSY;
8809 for (i = 0; i < count; i++) {
8810 udelay(5);
8811 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8812 if (val & MCPR_NVM_COMMAND_DONE) {
8813 rc = 0;
8814 break;
8815 }
8816 }
8817
8818 return rc;
8819}
8820
Eliezer Tamirf1410642008-02-28 11:51:50 -08008821#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008822
8823static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8824 int buf_size)
8825{
8826 int rc;
8827 u32 cmd_flags;
8828 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008829 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008830
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008831 if (offset + buf_size > bp->common.flash_size) {
8832 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008833 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008834 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008835 return -EINVAL;
8836 }
8837
8838 /* request access to nvram interface */
8839 rc = bnx2x_acquire_nvram_lock(bp);
8840 if (rc)
8841 return rc;
8842
8843 /* enable access to nvram interface */
8844 bnx2x_enable_nvram_access(bp);
8845
8846 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8847 align_offset = (offset & ~0x03);
8848 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8849
8850 if (rc == 0) {
8851 val &= ~(0xff << BYTE_OFFSET(offset));
8852 val |= (*data_buf << BYTE_OFFSET(offset));
8853
8854 /* nvram data is returned as an array of bytes
8855 * convert it back to cpu order */
8856 val = be32_to_cpu(val);
8857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008858 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8859 cmd_flags);
8860 }
8861
8862 /* disable access to nvram interface */
8863 bnx2x_disable_nvram_access(bp);
8864 bnx2x_release_nvram_lock(bp);
8865
8866 return rc;
8867}
8868
8869static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8870 int buf_size)
8871{
8872 int rc;
8873 u32 cmd_flags;
8874 u32 val;
8875 u32 written_so_far;
8876
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008877 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008878 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008879
8880 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008881 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008882 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008883 offset, buf_size);
8884 return -EINVAL;
8885 }
8886
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008887 if (offset + buf_size > bp->common.flash_size) {
8888 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008889 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008890 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008891 return -EINVAL;
8892 }
8893
8894 /* request access to nvram interface */
8895 rc = bnx2x_acquire_nvram_lock(bp);
8896 if (rc)
8897 return rc;
8898
8899 /* enable access to nvram interface */
8900 bnx2x_enable_nvram_access(bp);
8901
8902 written_so_far = 0;
8903 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8904 while ((written_so_far < buf_size) && (rc == 0)) {
8905 if (written_so_far == (buf_size - sizeof(u32)))
8906 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8907 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8908 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8909 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8910 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8911
8912 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008913
8914 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8915
8916 /* advance to the next dword */
8917 offset += sizeof(u32);
8918 data_buf += sizeof(u32);
8919 written_so_far += sizeof(u32);
8920 cmd_flags = 0;
8921 }
8922
8923 /* disable access to nvram interface */
8924 bnx2x_disable_nvram_access(bp);
8925 bnx2x_release_nvram_lock(bp);
8926
8927 return rc;
8928}
8929
8930static int bnx2x_set_eeprom(struct net_device *dev,
8931 struct ethtool_eeprom *eeprom, u8 *eebuf)
8932{
8933 struct bnx2x *bp = netdev_priv(dev);
8934 int rc;
8935
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08008936 if (!netif_running(dev))
8937 return -EAGAIN;
8938
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008939 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008940 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8941 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8942 eeprom->len, eeprom->len);
8943
8944 /* parameters already validated in ethtool_set_eeprom */
8945
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008946 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008947 if (eeprom->magic == 0x00504859)
8948 if (bp->port.pmf) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008949
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008950 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008951 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8952 bp->link_params.ext_phy_config,
8953 (bp->state != BNX2X_STATE_CLOSED),
8954 eebuf, eeprom->len);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008955 if ((bp->state == BNX2X_STATE_OPEN) ||
8956 (bp->state == BNX2X_STATE_DISABLED)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008957 rc |= bnx2x_link_reset(&bp->link_params,
Eilon Greenstein589abe32009-02-12 08:36:55 +00008958 &bp->link_vars, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008959 rc |= bnx2x_phy_init(&bp->link_params,
8960 &bp->link_vars);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008961 }
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008962 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008963
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008964 } else /* Only the PMF can access the PHY */
8965 return -EINVAL;
8966 else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008967 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008968
8969 return rc;
8970}
8971
8972static int bnx2x_get_coalesce(struct net_device *dev,
8973 struct ethtool_coalesce *coal)
8974{
8975 struct bnx2x *bp = netdev_priv(dev);
8976
8977 memset(coal, 0, sizeof(struct ethtool_coalesce));
8978
8979 coal->rx_coalesce_usecs = bp->rx_ticks;
8980 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008981
8982 return 0;
8983}
8984
8985static int bnx2x_set_coalesce(struct net_device *dev,
8986 struct ethtool_coalesce *coal)
8987{
8988 struct bnx2x *bp = netdev_priv(dev);
8989
8990 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8991 if (bp->rx_ticks > 3000)
8992 bp->rx_ticks = 3000;
8993
8994 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8995 if (bp->tx_ticks > 0x3000)
8996 bp->tx_ticks = 0x3000;
8997
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008998 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008999 bnx2x_update_coalesce(bp);
9000
9001 return 0;
9002}
9003
9004static void bnx2x_get_ringparam(struct net_device *dev,
9005 struct ethtool_ringparam *ering)
9006{
9007 struct bnx2x *bp = netdev_priv(dev);
9008
9009 ering->rx_max_pending = MAX_RX_AVAIL;
9010 ering->rx_mini_max_pending = 0;
9011 ering->rx_jumbo_max_pending = 0;
9012
9013 ering->rx_pending = bp->rx_ring_size;
9014 ering->rx_mini_pending = 0;
9015 ering->rx_jumbo_pending = 0;
9016
9017 ering->tx_max_pending = MAX_TX_AVAIL;
9018 ering->tx_pending = bp->tx_ring_size;
9019}
9020
9021static int bnx2x_set_ringparam(struct net_device *dev,
9022 struct ethtool_ringparam *ering)
9023{
9024 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009025 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009026
9027 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9028 (ering->tx_pending > MAX_TX_AVAIL) ||
9029 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9030 return -EINVAL;
9031
9032 bp->rx_ring_size = ering->rx_pending;
9033 bp->tx_ring_size = ering->tx_pending;
9034
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009035 if (netif_running(dev)) {
9036 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9037 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038 }
9039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009040 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009041}
9042
9043static void bnx2x_get_pauseparam(struct net_device *dev,
9044 struct ethtool_pauseparam *epause)
9045{
9046 struct bnx2x *bp = netdev_priv(dev);
9047
Eilon Greenstein356e2382009-02-12 08:38:32 +00009048 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9049 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009050 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9051
David S. Millerc0700f92008-12-16 23:53:20 -08009052 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9053 BNX2X_FLOW_CTRL_RX);
9054 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9055 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009056
9057 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9058 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9059 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9060}
9061
9062static int bnx2x_set_pauseparam(struct net_device *dev,
9063 struct ethtool_pauseparam *epause)
9064{
9065 struct bnx2x *bp = netdev_priv(dev);
9066
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009067 if (IS_E1HMF(bp))
9068 return 0;
9069
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9071 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9072 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9073
David S. Millerc0700f92008-12-16 23:53:20 -08009074 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009075
9076 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009077 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009078
9079 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009080 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009081
David S. Millerc0700f92008-12-16 23:53:20 -08009082 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9083 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009084
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009085 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009086 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009087 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009088 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009089 }
9090
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009091 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009092 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009093 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009094
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009095 DP(NETIF_MSG_LINK,
9096 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009097
9098 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009099 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009100 bnx2x_link_set(bp);
9101 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009102
9103 return 0;
9104}
9105
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009106static int bnx2x_set_flags(struct net_device *dev, u32 data)
9107{
9108 struct bnx2x *bp = netdev_priv(dev);
9109 int changed = 0;
9110 int rc = 0;
9111
9112 /* TPA requires Rx CSUM offloading */
9113 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9114 if (!(dev->features & NETIF_F_LRO)) {
9115 dev->features |= NETIF_F_LRO;
9116 bp->flags |= TPA_ENABLE_FLAG;
9117 changed = 1;
9118 }
9119
9120 } else if (dev->features & NETIF_F_LRO) {
9121 dev->features &= ~NETIF_F_LRO;
9122 bp->flags &= ~TPA_ENABLE_FLAG;
9123 changed = 1;
9124 }
9125
9126 if (changed && netif_running(dev)) {
9127 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9128 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9129 }
9130
9131 return rc;
9132}
9133
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009134static u32 bnx2x_get_rx_csum(struct net_device *dev)
9135{
9136 struct bnx2x *bp = netdev_priv(dev);
9137
9138 return bp->rx_csum;
9139}
9140
9141static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9142{
9143 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009144 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009145
9146 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009147
9148 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9149 TPA'ed packets will be discarded due to wrong TCP CSUM */
9150 if (!data) {
9151 u32 flags = ethtool_op_get_flags(dev);
9152
9153 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9154 }
9155
9156 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009157}
9158
9159static int bnx2x_set_tso(struct net_device *dev, u32 data)
9160{
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009161 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009162 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009163 dev->features |= NETIF_F_TSO6;
9164 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009165 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009166 dev->features &= ~NETIF_F_TSO6;
9167 }
9168
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009169 return 0;
9170}
9171
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009172static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009173 char string[ETH_GSTRING_LEN];
9174} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009175 { "register_test (offline)" },
9176 { "memory_test (offline)" },
9177 { "loopback_test (offline)" },
9178 { "nvram_test (online)" },
9179 { "interrupt_test (online)" },
9180 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +00009181 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009182};
9183
9184static int bnx2x_self_test_count(struct net_device *dev)
9185{
9186 return BNX2X_NUM_TESTS;
9187}
9188
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009189static int bnx2x_test_registers(struct bnx2x *bp)
9190{
9191 int idx, i, rc = -ENODEV;
9192 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009193 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009194 static const struct {
9195 u32 offset0;
9196 u32 offset1;
9197 u32 mask;
9198 } reg_tbl[] = {
9199/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9200 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9201 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9202 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9203 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9204 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9205 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9206 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9207 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9208 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9209/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9210 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9211 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9212 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9213 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9214 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9215 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9216 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9217 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9218 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9219/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9220 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9221 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9222 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9223 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9224 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9225 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9226 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9227 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9228 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9229/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9230 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9231 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9232 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9233 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9234 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9235 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9236 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9237
9238 { 0xffffffff, 0, 0x00000000 }
9239 };
9240
9241 if (!netif_running(bp->dev))
9242 return rc;
9243
9244 /* Repeat the test twice:
9245 First by writing 0x00000000, second by writing 0xffffffff */
9246 for (idx = 0; idx < 2; idx++) {
9247
9248 switch (idx) {
9249 case 0:
9250 wr_val = 0;
9251 break;
9252 case 1:
9253 wr_val = 0xffffffff;
9254 break;
9255 }
9256
9257 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9258 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009259
9260 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9261 mask = reg_tbl[i].mask;
9262
9263 save_val = REG_RD(bp, offset);
9264
9265 REG_WR(bp, offset, wr_val);
9266 val = REG_RD(bp, offset);
9267
9268 /* Restore the original register's value */
9269 REG_WR(bp, offset, save_val);
9270
9271 /* verify that value is as expected value */
9272 if ((val & mask) != (wr_val & mask))
9273 goto test_reg_exit;
9274 }
9275 }
9276
9277 rc = 0;
9278
9279test_reg_exit:
9280 return rc;
9281}
9282
9283static int bnx2x_test_memory(struct bnx2x *bp)
9284{
9285 int i, j, rc = -ENODEV;
9286 u32 val;
9287 static const struct {
9288 u32 offset;
9289 int size;
9290 } mem_tbl[] = {
9291 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9292 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9293 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9294 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9295 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9296 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9297 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9298
9299 { 0xffffffff, 0 }
9300 };
9301 static const struct {
9302 char *name;
9303 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009304 u32 e1_mask;
9305 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009306 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009307 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9308 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9309 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9310 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9311 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9312 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009313
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009314 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009315 };
9316
9317 if (!netif_running(bp->dev))
9318 return rc;
9319
9320 /* Go through all the memories */
9321 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9322 for (j = 0; j < mem_tbl[i].size; j++)
9323 REG_RD(bp, mem_tbl[i].offset + j*4);
9324
9325 /* Check the parity status */
9326 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9327 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009328 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9329 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009330 DP(NETIF_MSG_HW,
9331 "%s is 0x%x\n", prty_tbl[i].name, val);
9332 goto test_mem_exit;
9333 }
9334 }
9335
9336 rc = 0;
9337
9338test_mem_exit:
9339 return rc;
9340}
9341
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009342static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9343{
9344 int cnt = 1000;
9345
9346 if (link_up)
9347 while (bnx2x_link_test(bp) && cnt--)
9348 msleep(10);
9349}
9350
9351static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9352{
9353 unsigned int pkt_size, num_pkts, i;
9354 struct sk_buff *skb;
9355 unsigned char *packet;
9356 struct bnx2x_fastpath *fp = &bp->fp[0];
9357 u16 tx_start_idx, tx_idx;
9358 u16 rx_start_idx, rx_idx;
9359 u16 pkt_prod;
9360 struct sw_tx_bd *tx_buf;
9361 struct eth_tx_bd *tx_bd;
9362 dma_addr_t mapping;
9363 union eth_rx_cqe *cqe;
9364 u8 cqe_fp_flags;
9365 struct sw_rx_bd *rx_buf;
9366 u16 len;
9367 int rc = -ENODEV;
9368
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009369 /* check the loopback mode */
9370 switch (loopback_mode) {
9371 case BNX2X_PHY_LOOPBACK:
9372 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9373 return -EINVAL;
9374 break;
9375 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009376 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009377 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009378 break;
9379 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009380 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009381 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009382
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009383 /* prepare the loopback packet */
9384 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9385 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009386 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9387 if (!skb) {
9388 rc = -ENOMEM;
9389 goto test_loopback_exit;
9390 }
9391 packet = skb_put(skb, pkt_size);
9392 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9393 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9394 for (i = ETH_HLEN; i < pkt_size; i++)
9395 packet[i] = (unsigned char) (i & 0xff);
9396
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009397 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009398 num_pkts = 0;
9399 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9400 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9401
9402 pkt_prod = fp->tx_pkt_prod++;
9403 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9404 tx_buf->first_bd = fp->tx_bd_prod;
9405 tx_buf->skb = skb;
9406
9407 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9408 mapping = pci_map_single(bp->pdev, skb->data,
9409 skb_headlen(skb), PCI_DMA_TODEVICE);
9410 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9411 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9412 tx_bd->nbd = cpu_to_le16(1);
9413 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9414 tx_bd->vlan = cpu_to_le16(pkt_prod);
9415 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9416 ETH_TX_BD_FLAGS_END_BD);
9417 tx_bd->general_data = ((UNICAST_ADDRESS <<
9418 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9419
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08009420 wmb();
9421
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009422 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009423 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009424 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +00009425 DOORBELL(bp, fp->index, 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009426
9427 mmiowb();
9428
9429 num_pkts++;
9430 fp->tx_bd_prod++;
9431 bp->dev->trans_start = jiffies;
9432
9433 udelay(100);
9434
9435 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9436 if (tx_idx != tx_start_idx + num_pkts)
9437 goto test_loopback_exit;
9438
9439 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9440 if (rx_idx != rx_start_idx + num_pkts)
9441 goto test_loopback_exit;
9442
9443 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9444 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9445 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9446 goto test_loopback_rx_exit;
9447
9448 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9449 if (len != pkt_size)
9450 goto test_loopback_rx_exit;
9451
9452 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9453 skb = rx_buf->skb;
9454 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9455 for (i = ETH_HLEN; i < pkt_size; i++)
9456 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9457 goto test_loopback_rx_exit;
9458
9459 rc = 0;
9460
9461test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009462
9463 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9464 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9465 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9466 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9467
9468 /* Update producers */
9469 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9470 fp->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009471
9472test_loopback_exit:
9473 bp->link_params.loopback_mode = LOOPBACK_NONE;
9474
9475 return rc;
9476}
9477
9478static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9479{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009480 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009481
9482 if (!netif_running(bp->dev))
9483 return BNX2X_LOOPBACK_FAILED;
9484
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009485 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +00009486 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009487
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009488 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9489 if (res) {
9490 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9491 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009492 }
9493
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009494 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9495 if (res) {
9496 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9497 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009498 }
9499
Eilon Greenstein3910c8a2009-01-22 06:01:32 +00009500 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009501 bnx2x_netif_start(bp);
9502
9503 return rc;
9504}
9505
9506#define CRC32_RESIDUAL 0xdebb20e3
9507
9508static int bnx2x_test_nvram(struct bnx2x *bp)
9509{
9510 static const struct {
9511 int offset;
9512 int size;
9513 } nvram_tbl[] = {
9514 { 0, 0x14 }, /* bootstrap */
9515 { 0x14, 0xec }, /* dir */
9516 { 0x100, 0x350 }, /* manuf_info */
9517 { 0x450, 0xf0 }, /* feature_info */
9518 { 0x640, 0x64 }, /* upgrade_key_info */
9519 { 0x6a4, 0x64 },
9520 { 0x708, 0x70 }, /* manuf_key_info */
9521 { 0x778, 0x70 },
9522 { 0, 0 }
9523 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009524 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009525 u8 *data = (u8 *)buf;
9526 int i, rc;
9527 u32 magic, csum;
9528
9529 rc = bnx2x_nvram_read(bp, 0, data, 4);
9530 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00009531 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009532 goto test_nvram_exit;
9533 }
9534
9535 magic = be32_to_cpu(buf[0]);
9536 if (magic != 0x669955aa) {
9537 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9538 rc = -ENODEV;
9539 goto test_nvram_exit;
9540 }
9541
9542 for (i = 0; nvram_tbl[i].size; i++) {
9543
9544 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9545 nvram_tbl[i].size);
9546 if (rc) {
9547 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +00009548 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009549 goto test_nvram_exit;
9550 }
9551
9552 csum = ether_crc_le(nvram_tbl[i].size, data);
9553 if (csum != CRC32_RESIDUAL) {
9554 DP(NETIF_MSG_PROBE,
9555 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9556 rc = -ENODEV;
9557 goto test_nvram_exit;
9558 }
9559 }
9560
9561test_nvram_exit:
9562 return rc;
9563}
9564
9565static int bnx2x_test_intr(struct bnx2x *bp)
9566{
9567 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9568 int i, rc;
9569
9570 if (!netif_running(bp->dev))
9571 return -ENODEV;
9572
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08009573 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +00009574 if (CHIP_IS_E1(bp))
9575 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9576 else
9577 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00009578 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009579 config->hdr.reserved1 = 0;
9580
9581 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9582 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9583 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9584 if (rc == 0) {
9585 bp->set_mac_pending++;
9586 for (i = 0; i < 10; i++) {
9587 if (!bp->set_mac_pending)
9588 break;
9589 msleep_interruptible(10);
9590 }
9591 if (i == 10)
9592 rc = -ENODEV;
9593 }
9594
9595 return rc;
9596}
9597
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009598static void bnx2x_self_test(struct net_device *dev,
9599 struct ethtool_test *etest, u64 *buf)
9600{
9601 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009602
9603 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9604
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009605 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009606 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009607
Eilon Greenstein33471622008-08-13 15:59:08 -07009608 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009609 if (IS_E1HMF(bp))
9610 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9611
9612 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9613 u8 link_up;
9614
9615 link_up = bp->link_vars.link_up;
9616 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9617 bnx2x_nic_load(bp, LOAD_DIAG);
9618 /* wait until link state is restored */
9619 bnx2x_wait_for_link(bp, link_up);
9620
9621 if (bnx2x_test_registers(bp) != 0) {
9622 buf[0] = 1;
9623 etest->flags |= ETH_TEST_FL_FAILED;
9624 }
9625 if (bnx2x_test_memory(bp) != 0) {
9626 buf[1] = 1;
9627 etest->flags |= ETH_TEST_FL_FAILED;
9628 }
9629 buf[2] = bnx2x_test_loopback(bp, link_up);
9630 if (buf[2] != 0)
9631 etest->flags |= ETH_TEST_FL_FAILED;
9632
9633 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9634 bnx2x_nic_load(bp, LOAD_NORMAL);
9635 /* wait until link state is restored */
9636 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009637 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009638 if (bnx2x_test_nvram(bp) != 0) {
9639 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009640 etest->flags |= ETH_TEST_FL_FAILED;
9641 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009642 if (bnx2x_test_intr(bp) != 0) {
9643 buf[4] = 1;
9644 etest->flags |= ETH_TEST_FL_FAILED;
9645 }
9646 if (bp->port.pmf)
9647 if (bnx2x_link_test(bp) != 0) {
9648 buf[5] = 1;
9649 etest->flags |= ETH_TEST_FL_FAILED;
9650 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009651
9652#ifdef BNX2X_EXTRA_DEBUG
9653 bnx2x_panic_dump(bp);
9654#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009655}
9656
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009657static const struct {
9658 long offset;
9659 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +00009660 u8 string[ETH_GSTRING_LEN];
9661} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9662/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9663 { Q_STATS_OFFSET32(error_bytes_received_hi),
9664 8, "[%d]: rx_error_bytes" },
9665 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9666 8, "[%d]: rx_ucast_packets" },
9667 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9668 8, "[%d]: rx_mcast_packets" },
9669 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9670 8, "[%d]: rx_bcast_packets" },
9671 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9672 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9673 4, "[%d]: rx_phy_ip_err_discards"},
9674 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9675 4, "[%d]: rx_skb_alloc_discard" },
9676 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9677
9678/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9679 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9680 8, "[%d]: tx_packets" }
9681};
9682
9683static const struct {
9684 long offset;
9685 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009686 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009687#define STATS_FLAGS_PORT 1
9688#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +00009689#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009690 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009691} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +00009692/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9693 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009694 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009695 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009696 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009697 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009698 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009699 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009700 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009701 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009702 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009703 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009704 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009705 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009706 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9707 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9708 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9709 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9710/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9711 8, STATS_FLAGS_PORT, "rx_fragments" },
9712 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9713 8, STATS_FLAGS_PORT, "rx_jabbers" },
9714 { STATS_OFFSET32(no_buff_discard_hi),
9715 8, STATS_FLAGS_BOTH, "rx_discards" },
9716 { STATS_OFFSET32(mac_filter_discard),
9717 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9718 { STATS_OFFSET32(xxoverflow_discard),
9719 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9720 { STATS_OFFSET32(brb_drop_hi),
9721 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9722 { STATS_OFFSET32(brb_truncate_hi),
9723 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9724 { STATS_OFFSET32(pause_frames_received_hi),
9725 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9726 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9727 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9728 { STATS_OFFSET32(nig_timer_max),
9729 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9730/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9731 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9732 { STATS_OFFSET32(rx_skb_alloc_failed),
9733 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9734 { STATS_OFFSET32(hw_csum_err),
9735 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9736
9737 { STATS_OFFSET32(total_bytes_transmitted_hi),
9738 8, STATS_FLAGS_BOTH, "tx_bytes" },
9739 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9740 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9741 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9742 8, STATS_FLAGS_BOTH, "tx_packets" },
9743 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9744 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9745 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9746 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009747 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009748 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009749 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009750 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009751/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009752 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009753 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009754 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009755 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009756 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009757 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009758 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009759 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009760 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009761 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009762 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009763 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009764 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009765 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009766 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009767 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009768 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009769 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009770 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009771/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009772 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009773 { STATS_OFFSET32(pause_frames_sent_hi),
9774 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009775};
9776
Eilon Greensteinde832a52009-02-12 08:36:33 +00009777#define IS_PORT_STAT(i) \
9778 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9779#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9780#define IS_E1HMF_MODE_STAT(bp) \
9781 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009782
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009783static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9784{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009785 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009786 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009788 switch (stringset) {
9789 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +00009790 if (is_multi(bp)) {
9791 k = 0;
9792 for_each_queue(bp, i) {
9793 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9794 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9795 bnx2x_q_stats_arr[j].string, i);
9796 k += BNX2X_NUM_Q_STATS;
9797 }
9798 if (IS_E1HMF_MODE_STAT(bp))
9799 break;
9800 for (j = 0; j < BNX2X_NUM_STATS; j++)
9801 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9802 bnx2x_stats_arr[j].string);
9803 } else {
9804 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9805 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9806 continue;
9807 strcpy(buf + j*ETH_GSTRING_LEN,
9808 bnx2x_stats_arr[i].string);
9809 j++;
9810 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009811 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009812 break;
9813
9814 case ETH_SS_TEST:
9815 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9816 break;
9817 }
9818}
9819
9820static int bnx2x_get_stats_count(struct net_device *dev)
9821{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009822 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009823 int i, num_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009824
Eilon Greensteinde832a52009-02-12 08:36:33 +00009825 if (is_multi(bp)) {
9826 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9827 if (!IS_E1HMF_MODE_STAT(bp))
9828 num_stats += BNX2X_NUM_STATS;
9829 } else {
9830 if (IS_E1HMF_MODE_STAT(bp)) {
9831 num_stats = 0;
9832 for (i = 0; i < BNX2X_NUM_STATS; i++)
9833 if (IS_FUNC_STAT(i))
9834 num_stats++;
9835 } else
9836 num_stats = BNX2X_NUM_STATS;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009837 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009838
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009839 return num_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009840}
9841
9842static void bnx2x_get_ethtool_stats(struct net_device *dev,
9843 struct ethtool_stats *stats, u64 *buf)
9844{
9845 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009846 u32 *hw_stats, *offset;
9847 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009848
Eilon Greensteinde832a52009-02-12 08:36:33 +00009849 if (is_multi(bp)) {
9850 k = 0;
9851 for_each_queue(bp, i) {
9852 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9853 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9854 if (bnx2x_q_stats_arr[j].size == 0) {
9855 /* skip this counter */
9856 buf[k + j] = 0;
9857 continue;
9858 }
9859 offset = (hw_stats +
9860 bnx2x_q_stats_arr[j].offset);
9861 if (bnx2x_q_stats_arr[j].size == 4) {
9862 /* 4-byte counter */
9863 buf[k + j] = (u64) *offset;
9864 continue;
9865 }
9866 /* 8-byte counter */
9867 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9868 }
9869 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009870 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009871 if (IS_E1HMF_MODE_STAT(bp))
9872 return;
9873 hw_stats = (u32 *)&bp->eth_stats;
9874 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9875 if (bnx2x_stats_arr[j].size == 0) {
9876 /* skip this counter */
9877 buf[k + j] = 0;
9878 continue;
9879 }
9880 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9881 if (bnx2x_stats_arr[j].size == 4) {
9882 /* 4-byte counter */
9883 buf[k + j] = (u64) *offset;
9884 continue;
9885 }
9886 /* 8-byte counter */
9887 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009888 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009889 } else {
9890 hw_stats = (u32 *)&bp->eth_stats;
9891 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9892 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9893 continue;
9894 if (bnx2x_stats_arr[i].size == 0) {
9895 /* skip this counter */
9896 buf[j] = 0;
9897 j++;
9898 continue;
9899 }
9900 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9901 if (bnx2x_stats_arr[i].size == 4) {
9902 /* 4-byte counter */
9903 buf[j] = (u64) *offset;
9904 j++;
9905 continue;
9906 }
9907 /* 8-byte counter */
9908 buf[j] = HILO_U64(*offset, *(offset + 1));
9909 j++;
9910 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009911 }
9912}
9913
9914static int bnx2x_phys_id(struct net_device *dev, u32 data)
9915{
9916 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009917 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009918 int i;
9919
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009920 if (!netif_running(dev))
9921 return 0;
9922
9923 if (!bp->port.pmf)
9924 return 0;
9925
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009926 if (data == 0)
9927 data = 2;
9928
9929 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009930 if ((i % 2) == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009931 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009932 bp->link_params.hw_led_mode,
9933 bp->link_params.chip_id);
9934 else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009935 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009936 bp->link_params.hw_led_mode,
9937 bp->link_params.chip_id);
9938
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009939 msleep_interruptible(500);
9940 if (signal_pending(current))
9941 break;
9942 }
9943
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009944 if (bp->link_vars.link_up)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009945 bnx2x_set_led(bp, port, LED_MODE_OPER,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009946 bp->link_vars.line_speed,
9947 bp->link_params.hw_led_mode,
9948 bp->link_params.chip_id);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009949
9950 return 0;
9951}
9952
9953static struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009954 .get_settings = bnx2x_get_settings,
9955 .set_settings = bnx2x_set_settings,
9956 .get_drvinfo = bnx2x_get_drvinfo,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009957 .get_wol = bnx2x_get_wol,
9958 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009959 .get_msglevel = bnx2x_get_msglevel,
9960 .set_msglevel = bnx2x_set_msglevel,
9961 .nway_reset = bnx2x_nway_reset,
9962 .get_link = ethtool_op_get_link,
9963 .get_eeprom_len = bnx2x_get_eeprom_len,
9964 .get_eeprom = bnx2x_get_eeprom,
9965 .set_eeprom = bnx2x_set_eeprom,
9966 .get_coalesce = bnx2x_get_coalesce,
9967 .set_coalesce = bnx2x_set_coalesce,
9968 .get_ringparam = bnx2x_get_ringparam,
9969 .set_ringparam = bnx2x_set_ringparam,
9970 .get_pauseparam = bnx2x_get_pauseparam,
9971 .set_pauseparam = bnx2x_set_pauseparam,
9972 .get_rx_csum = bnx2x_get_rx_csum,
9973 .set_rx_csum = bnx2x_set_rx_csum,
9974 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -07009975 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009976 .set_flags = bnx2x_set_flags,
9977 .get_flags = ethtool_op_get_flags,
9978 .get_sg = ethtool_op_get_sg,
9979 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009980 .get_tso = ethtool_op_get_tso,
9981 .set_tso = bnx2x_set_tso,
9982 .self_test_count = bnx2x_self_test_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009983 .self_test = bnx2x_self_test,
9984 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009985 .phys_id = bnx2x_phys_id,
9986 .get_stats_count = bnx2x_get_stats_count,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009987 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009988};
9989
9990/* end of ethtool_ops */
9991
9992/****************************************************************************
9993* General service functions
9994****************************************************************************/
9995
9996static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9997{
9998 u16 pmcsr;
9999
10000 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10001
10002 switch (state) {
10003 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010004 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010005 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10006 PCI_PM_CTRL_PME_STATUS));
10007
10008 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010009 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010010 msleep(20);
10011 break;
10012
10013 case PCI_D3hot:
10014 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10015 pmcsr |= 3;
10016
10017 if (bp->wol)
10018 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10019
10020 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10021 pmcsr);
10022
10023 /* No more memory access after this point until
10024 * device is brought back to D0.
10025 */
10026 break;
10027
10028 default:
10029 return -EINVAL;
10030 }
10031 return 0;
10032}
10033
Eilon Greenstein237907c2009-01-14 06:42:44 +000010034static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10035{
10036 u16 rx_cons_sb;
10037
10038 /* Tell compiler that status block fields can change */
10039 barrier();
10040 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10041 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10042 rx_cons_sb++;
10043 return (fp->rx_comp_cons != rx_cons_sb);
10044}
10045
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010046/*
10047 * net_device service functions
10048 */
10049
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010050static int bnx2x_poll(struct napi_struct *napi, int budget)
10051{
10052 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10053 napi);
10054 struct bnx2x *bp = fp->bp;
10055 int work_done = 0;
10056
10057#ifdef BNX2X_STOP_ON_ERROR
10058 if (unlikely(bp->panic))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010059 goto poll_panic;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010060#endif
10061
10062 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10063 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10064 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10065
10066 bnx2x_update_fpsb_idx(fp);
10067
Eilon Greenstein237907c2009-01-14 06:42:44 +000010068 if (bnx2x_has_tx_work(fp))
Eilon Greenstein7961f792009-03-02 07:59:31 +000010069 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010070
Eilon Greenstein8534f322009-03-02 07:59:45 +000010071 if (bnx2x_has_rx_work(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010072 work_done = bnx2x_rx_int(fp, budget);
Eilon Greenstein356e2382009-02-12 08:38:32 +000010073
Eilon Greenstein8534f322009-03-02 07:59:45 +000010074 /* must not complete if we consumed full budget */
10075 if (work_done >= budget)
10076 goto poll_again;
10077 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010078
Eilon Greenstein8534f322009-03-02 07:59:45 +000010079 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10080 * ensure that status block indices have been actually read
10081 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10082 * so that we won't write the "newer" value of the status block to IGU
10083 * (if there was a DMA right after BNX2X_HAS_WORK and
10084 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10085 * may be postponed to right before bnx2x_ack_sb). In this case
10086 * there will never be another interrupt until there is another update
10087 * of the status block, while there is still unhandled work.
10088 */
10089 rmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010090
Eilon Greenstein8534f322009-03-02 07:59:45 +000010091 if (!BNX2X_HAS_WORK(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010092#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010093poll_panic:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010094#endif
Ben Hutchings288379f2009-01-19 16:43:59 -080010095 napi_complete(napi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010096
Eilon Greenstein0626b892009-02-12 08:38:14 +000010097 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010098 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010099 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010100 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10101 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010102
Eilon Greenstein8534f322009-03-02 07:59:45 +000010103poll_again:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010104 return work_done;
10105}
10106
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010107
10108/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010109 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010110 * we use one mapping for both BDs
10111 * So far this has only been observed to happen
10112 * in Other Operating Systems(TM)
10113 */
10114static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10115 struct bnx2x_fastpath *fp,
10116 struct eth_tx_bd **tx_bd, u16 hlen,
10117 u16 bd_prod, int nbd)
10118{
10119 struct eth_tx_bd *h_tx_bd = *tx_bd;
10120 struct eth_tx_bd *d_tx_bd;
10121 dma_addr_t mapping;
10122 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10123
10124 /* first fix first BD */
10125 h_tx_bd->nbd = cpu_to_le16(nbd);
10126 h_tx_bd->nbytes = cpu_to_le16(hlen);
10127
10128 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10129 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10130 h_tx_bd->addr_lo, h_tx_bd->nbd);
10131
10132 /* now get a new data BD
10133 * (after the pbd) and fill it */
10134 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10135 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10136
10137 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10138 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10139
10140 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10141 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10142 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10143 d_tx_bd->vlan = 0;
10144 /* this marks the BD as one that has no individual mapping
10145 * the FW ignores this flag in a BD not marked start
10146 */
10147 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10148 DP(NETIF_MSG_TX_QUEUED,
10149 "TSO split data size is %d (%x:%x)\n",
10150 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10151
10152 /* update tx_bd for marking the last BD flag */
10153 *tx_bd = d_tx_bd;
10154
10155 return bd_prod;
10156}
10157
10158static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10159{
10160 if (fix > 0)
10161 csum = (u16) ~csum_fold(csum_sub(csum,
10162 csum_partial(t_header - fix, fix, 0)));
10163
10164 else if (fix < 0)
10165 csum = (u16) ~csum_fold(csum_add(csum,
10166 csum_partial(t_header, -fix, 0)));
10167
10168 return swab16(csum);
10169}
10170
10171static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10172{
10173 u32 rc;
10174
10175 if (skb->ip_summed != CHECKSUM_PARTIAL)
10176 rc = XMIT_PLAIN;
10177
10178 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010179 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010180 rc = XMIT_CSUM_V6;
10181 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10182 rc |= XMIT_CSUM_TCP;
10183
10184 } else {
10185 rc = XMIT_CSUM_V4;
10186 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10187 rc |= XMIT_CSUM_TCP;
10188 }
10189 }
10190
10191 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10192 rc |= XMIT_GSO_V4;
10193
10194 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10195 rc |= XMIT_GSO_V6;
10196
10197 return rc;
10198}
10199
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010200#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000010201/* check if packet requires linearization (packet is too fragmented)
10202 no need to check fragmentation if page size > 8K (there will be no
10203 violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010204static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10205 u32 xmit_type)
10206{
10207 int to_copy = 0;
10208 int hlen = 0;
10209 int first_bd_sz = 0;
10210
10211 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10212 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10213
10214 if (xmit_type & XMIT_GSO) {
10215 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10216 /* Check if LSO packet needs to be copied:
10217 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10218 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070010219 /* Number of windows to check */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010220 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10221 int wnd_idx = 0;
10222 int frag_idx = 0;
10223 u32 wnd_sum = 0;
10224
10225 /* Headers length */
10226 hlen = (int)(skb_transport_header(skb) - skb->data) +
10227 tcp_hdrlen(skb);
10228
10229 /* Amount of data (w/o headers) on linear part of SKB*/
10230 first_bd_sz = skb_headlen(skb) - hlen;
10231
10232 wnd_sum = first_bd_sz;
10233
10234 /* Calculate the first sum - it's special */
10235 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10236 wnd_sum +=
10237 skb_shinfo(skb)->frags[frag_idx].size;
10238
10239 /* If there was data on linear skb data - check it */
10240 if (first_bd_sz > 0) {
10241 if (unlikely(wnd_sum < lso_mss)) {
10242 to_copy = 1;
10243 goto exit_lbl;
10244 }
10245
10246 wnd_sum -= first_bd_sz;
10247 }
10248
10249 /* Others are easier: run through the frag list and
10250 check all windows */
10251 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10252 wnd_sum +=
10253 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10254
10255 if (unlikely(wnd_sum < lso_mss)) {
10256 to_copy = 1;
10257 break;
10258 }
10259 wnd_sum -=
10260 skb_shinfo(skb)->frags[wnd_idx].size;
10261 }
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010262 } else {
10263 /* in non-LSO too fragmented packet should always
10264 be linearized */
10265 to_copy = 1;
10266 }
10267 }
10268
10269exit_lbl:
10270 if (unlikely(to_copy))
10271 DP(NETIF_MSG_TX_QUEUED,
10272 "Linearization IS REQUIRED for %s packet. "
10273 "num_frags %d hlen %d first_bd_sz %d\n",
10274 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10275 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10276
10277 return to_copy;
10278}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010279#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010280
10281/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010282 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010283 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010284 */
10285static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10286{
10287 struct bnx2x *bp = netdev_priv(dev);
10288 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010289 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010290 struct sw_tx_bd *tx_buf;
10291 struct eth_tx_bd *tx_bd;
10292 struct eth_tx_parse_bd *pbd = NULL;
10293 u16 pkt_prod, bd_prod;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010294 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010295 dma_addr_t mapping;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010296 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10297 int vlan_off = (bp->e1hov ? 4 : 0);
10298 int i;
10299 u8 hlen = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010300
10301#ifdef BNX2X_STOP_ON_ERROR
10302 if (unlikely(bp->panic))
10303 return NETDEV_TX_BUSY;
10304#endif
10305
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010306 fp_index = skb_get_queue_mapping(skb);
10307 txq = netdev_get_tx_queue(dev, fp_index);
10308
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010309 fp = &bp->fp[fp_index];
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010310
Yitchak Gertner231fd582008-08-25 15:27:06 -070010311 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010312 fp->eth_q_stats.driver_xoff++,
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010313 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010314 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10315 return NETDEV_TX_BUSY;
10316 }
10317
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010318 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10319 " gso type %x xmit_type %x\n",
10320 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10321 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10322
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010323#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000010324 /* First, check if we need to linearize the skb (due to FW
10325 restrictions). No need to check fragmentation if page size > 8K
10326 (there will be no violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010327 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10328 /* Statistics of linearization */
10329 bp->lin_cnt++;
10330 if (skb_linearize(skb) != 0) {
10331 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10332 "silently dropping this SKB\n");
10333 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070010334 return NETDEV_TX_OK;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010335 }
10336 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010337#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010338
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010339 /*
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010340 Please read carefully. First we use one BD which we mark as start,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010341 then for TSO or xsum we have a parsing info BD,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010342 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010343 (don't forget to mark the last one as last,
10344 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010345 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010346 */
10347
10348 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010349 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010350
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010351 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010352 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10353 tx_bd = &fp->tx_desc_ring[bd_prod];
10354
10355 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10356 tx_bd->general_data = (UNICAST_ADDRESS <<
10357 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010358 /* header nbd */
10359 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010360
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010361 /* remember the first BD of the packet */
10362 tx_buf->first_bd = fp->tx_bd_prod;
10363 tx_buf->skb = skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010364
10365 DP(NETIF_MSG_TX_QUEUED,
10366 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10367 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10368
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010369#ifdef BCM_VLAN
10370 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10371 (bp->flags & HW_VLAN_TX_FLAG)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010372 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10373 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010374 vlan_off += 4;
10375 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010376#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010377 tx_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010378
10379 if (xmit_type) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010380 /* turn on parsing and get a BD */
10381 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10382 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10383
10384 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10385 }
10386
10387 if (xmit_type & XMIT_CSUM) {
10388 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10389
10390 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010391 pbd->global_data =
10392 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10393 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010394
10395 pbd->ip_hlen = (skb_transport_header(skb) -
10396 skb_network_header(skb)) / 2;
10397
10398 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10399
10400 pbd->total_hlen = cpu_to_le16(hlen);
10401 hlen = hlen*2 - vlan_off;
10402
10403 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10404
10405 if (xmit_type & XMIT_CSUM_V4)
10406 tx_bd->bd_flags.as_bitfield |=
10407 ETH_TX_BD_FLAGS_IP_CSUM;
10408 else
10409 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10410
10411 if (xmit_type & XMIT_CSUM_TCP) {
10412 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10413
10414 } else {
10415 s8 fix = SKB_CS_OFF(skb); /* signed! */
10416
10417 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10418 pbd->cs_offset = fix / 2;
10419
10420 DP(NETIF_MSG_TX_QUEUED,
10421 "hlen %d offset %d fix %d csum before fix %x\n",
10422 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10423 SKB_CS(skb));
10424
10425 /* HW bug: fixup the CSUM */
10426 pbd->tcp_pseudo_csum =
10427 bnx2x_csum_fix(skb_transport_header(skb),
10428 SKB_CS(skb), fix);
10429
10430 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10431 pbd->tcp_pseudo_csum);
10432 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010433 }
10434
10435 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010436 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010437
10438 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10439 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eilon Greenstein6378c022008-08-13 15:59:25 -070010440 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010441 tx_bd->nbd = cpu_to_le16(nbd);
10442 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10443
10444 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010445 " nbytes %d flags %x vlan %x\n",
10446 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10447 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10448 le16_to_cpu(tx_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010449
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010450 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010451
10452 DP(NETIF_MSG_TX_QUEUED,
10453 "TSO packet len %d hlen %d total len %d tso size %d\n",
10454 skb->len, hlen, skb_headlen(skb),
10455 skb_shinfo(skb)->gso_size);
10456
10457 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10458
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010459 if (unlikely(skb_headlen(skb) > hlen))
10460 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10461 bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010462
10463 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10464 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010465 pbd->tcp_flags = pbd_tcp_flags(skb);
10466
10467 if (xmit_type & XMIT_GSO_V4) {
10468 pbd->ip_id = swab16(ip_hdr(skb)->id);
10469 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010470 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10471 ip_hdr(skb)->daddr,
10472 0, IPPROTO_TCP, 0));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010473
10474 } else
10475 pbd->tcp_pseudo_csum =
10476 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10477 &ipv6_hdr(skb)->daddr,
10478 0, IPPROTO_TCP, 0));
10479
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010480 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10481 }
10482
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010483 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10484 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010485
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010486 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10487 tx_bd = &fp->tx_desc_ring[bd_prod];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010488
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010489 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10490 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010491
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010492 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10493 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10494 tx_bd->nbytes = cpu_to_le16(frag->size);
10495 tx_bd->vlan = cpu_to_le16(pkt_prod);
10496 tx_bd->bd_flags.as_bitfield = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010497
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010498 DP(NETIF_MSG_TX_QUEUED,
10499 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10500 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10501 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010502 }
10503
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010504 /* now at last mark the BD as the last BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010505 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10506
10507 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10508 tx_bd, tx_bd->bd_flags.as_bitfield);
10509
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010510 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10511
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010512 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010513 * if the packet contains or ends with it
10514 */
10515 if (TX_BD_POFF(bd_prod) < nbd)
10516 nbd++;
10517
10518 if (pbd)
10519 DP(NETIF_MSG_TX_QUEUED,
10520 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10521 " tcp_flags %x xsum %x seq %u hlen %u\n",
10522 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10523 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010524 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010525
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010526 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010527
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010528 /*
10529 * Make sure that the BD data is updated before updating the producer
10530 * since FW might read the BD right after the producer is updated.
10531 * This is only applicable for weak-ordered memory model archs such
10532 * as IA-64. The following barrier is also mandatory since FW will
10533 * assumes packets must have BDs.
10534 */
10535 wmb();
10536
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010537 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010538 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010539 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010540 DOORBELL(bp, fp->index, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010541
10542 mmiowb();
10543
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010544 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010545 dev->trans_start = jiffies;
10546
10547 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010548 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10549 if we put Tx into XOFF state. */
10550 smp_mb();
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010551 netif_tx_stop_queue(txq);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010552 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010553 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010554 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010555 }
10556 fp->tx_pkt++;
10557
10558 return NETDEV_TX_OK;
10559}
10560
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010561/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010562static int bnx2x_open(struct net_device *dev)
10563{
10564 struct bnx2x *bp = netdev_priv(dev);
10565
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000010566 netif_carrier_off(dev);
10567
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010568 bnx2x_set_power_state(bp, PCI_D0);
10569
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010570 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010571}
10572
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010573/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010574static int bnx2x_close(struct net_device *dev)
10575{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010576 struct bnx2x *bp = netdev_priv(dev);
10577
10578 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010579 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10580 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10581 if (!CHIP_REV_IS_SLOW(bp))
10582 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010583
10584 return 0;
10585}
10586
Eilon Greensteinf5372252009-02-12 08:38:30 +000010587/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010588static void bnx2x_set_rx_mode(struct net_device *dev)
10589{
10590 struct bnx2x *bp = netdev_priv(dev);
10591 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10592 int port = BP_PORT(bp);
10593
10594 if (bp->state != BNX2X_STATE_OPEN) {
10595 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10596 return;
10597 }
10598
10599 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10600
10601 if (dev->flags & IFF_PROMISC)
10602 rx_mode = BNX2X_RX_MODE_PROMISC;
10603
10604 else if ((dev->flags & IFF_ALLMULTI) ||
10605 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10606 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10607
10608 else { /* some multicasts */
10609 if (CHIP_IS_E1(bp)) {
10610 int i, old, offset;
10611 struct dev_mc_list *mclist;
10612 struct mac_configuration_cmd *config =
10613 bnx2x_sp(bp, mcast_config);
10614
10615 for (i = 0, mclist = dev->mc_list;
10616 mclist && (i < dev->mc_count);
10617 i++, mclist = mclist->next) {
10618
10619 config->config_table[i].
10620 cam_entry.msb_mac_addr =
10621 swab16(*(u16 *)&mclist->dmi_addr[0]);
10622 config->config_table[i].
10623 cam_entry.middle_mac_addr =
10624 swab16(*(u16 *)&mclist->dmi_addr[2]);
10625 config->config_table[i].
10626 cam_entry.lsb_mac_addr =
10627 swab16(*(u16 *)&mclist->dmi_addr[4]);
10628 config->config_table[i].cam_entry.flags =
10629 cpu_to_le16(port);
10630 config->config_table[i].
10631 target_table_entry.flags = 0;
10632 config->config_table[i].
10633 target_table_entry.client_id = 0;
10634 config->config_table[i].
10635 target_table_entry.vlan_id = 0;
10636
10637 DP(NETIF_MSG_IFUP,
10638 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10639 config->config_table[i].
10640 cam_entry.msb_mac_addr,
10641 config->config_table[i].
10642 cam_entry.middle_mac_addr,
10643 config->config_table[i].
10644 cam_entry.lsb_mac_addr);
10645 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010646 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010647 if (old > i) {
10648 for (; i < old; i++) {
10649 if (CAM_IS_INVALID(config->
10650 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000010651 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010652 break;
10653 }
10654 /* invalidate */
10655 CAM_INVALIDATE(config->
10656 config_table[i]);
10657 }
10658 }
10659
10660 if (CHIP_REV_IS_SLOW(bp))
10661 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10662 else
10663 offset = BNX2X_MAX_MULTICAST*(1 + port);
10664
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010665 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010666 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010667 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010668 config->hdr.reserved1 = 0;
10669
10670 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10671 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10672 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10673 0);
10674 } else { /* E1H */
10675 /* Accept one or more multicasts */
10676 struct dev_mc_list *mclist;
10677 u32 mc_filter[MC_HASH_SIZE];
10678 u32 crc, bit, regidx;
10679 int i;
10680
10681 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10682
10683 for (i = 0, mclist = dev->mc_list;
10684 mclist && (i < dev->mc_count);
10685 i++, mclist = mclist->next) {
10686
Johannes Berg7c510e42008-10-27 17:47:26 -070010687 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10688 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010689
10690 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10691 bit = (crc >> 24) & 0xff;
10692 regidx = bit >> 5;
10693 bit &= 0x1f;
10694 mc_filter[regidx] |= (1 << bit);
10695 }
10696
10697 for (i = 0; i < MC_HASH_SIZE; i++)
10698 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10699 mc_filter[i]);
10700 }
10701 }
10702
10703 bp->rx_mode = rx_mode;
10704 bnx2x_set_storm_rx_mode(bp);
10705}
10706
10707/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010708static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10709{
10710 struct sockaddr *addr = p;
10711 struct bnx2x *bp = netdev_priv(dev);
10712
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010713 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010714 return -EINVAL;
10715
10716 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010717 if (netif_running(dev)) {
10718 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070010719 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010720 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070010721 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010722 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010723
10724 return 0;
10725}
10726
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010727/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010728static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10729{
10730 struct mii_ioctl_data *data = if_mii(ifr);
10731 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010732 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010733 int err;
10734
10735 switch (cmd) {
10736 case SIOCGMIIPHY:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010737 data->phy_id = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010738
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010739 /* fallthrough */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010740
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010741 case SIOCGMIIREG: {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010742 u16 mii_regval;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010743
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010744 if (!netif_running(dev))
10745 return -EAGAIN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010746
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010747 mutex_lock(&bp->port.phy_mutex);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010748 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010749 DEFAULT_PHY_DEV_ADDR,
10750 (data->reg_num & 0x1f), &mii_regval);
10751 data->val_out = mii_regval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010752 mutex_unlock(&bp->port.phy_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010753 return err;
10754 }
10755
10756 case SIOCSMIIREG:
10757 if (!capable(CAP_NET_ADMIN))
10758 return -EPERM;
10759
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010760 if (!netif_running(dev))
10761 return -EAGAIN;
10762
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010763 mutex_lock(&bp->port.phy_mutex);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010764 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010765 DEFAULT_PHY_DEV_ADDR,
10766 (data->reg_num & 0x1f), data->val_in);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010767 mutex_unlock(&bp->port.phy_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010768 return err;
10769
10770 default:
10771 /* do nothing */
10772 break;
10773 }
10774
10775 return -EOPNOTSUPP;
10776}
10777
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010778/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010779static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10780{
10781 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010782 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010783
10784 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10785 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10786 return -EINVAL;
10787
10788 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010789 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010790 * only updated as part of load
10791 */
10792 dev->mtu = new_mtu;
10793
10794 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010795 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10796 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010797 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010798
10799 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010800}
10801
10802static void bnx2x_tx_timeout(struct net_device *dev)
10803{
10804 struct bnx2x *bp = netdev_priv(dev);
10805
10806#ifdef BNX2X_STOP_ON_ERROR
10807 if (!bp->panic)
10808 bnx2x_panic();
10809#endif
10810 /* This allows the netif to be shutdown gracefully before resetting */
10811 schedule_work(&bp->reset_task);
10812}
10813
10814#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010815/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010816static void bnx2x_vlan_rx_register(struct net_device *dev,
10817 struct vlan_group *vlgrp)
10818{
10819 struct bnx2x *bp = netdev_priv(dev);
10820
10821 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010822
10823 /* Set flags according to the required capabilities */
10824 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10825
10826 if (dev->features & NETIF_F_HW_VLAN_TX)
10827 bp->flags |= HW_VLAN_TX_FLAG;
10828
10829 if (dev->features & NETIF_F_HW_VLAN_RX)
10830 bp->flags |= HW_VLAN_RX_FLAG;
10831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010832 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080010833 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010834}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010835
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010836#endif
10837
10838#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10839static void poll_bnx2x(struct net_device *dev)
10840{
10841 struct bnx2x *bp = netdev_priv(dev);
10842
10843 disable_irq(bp->pdev->irq);
10844 bnx2x_interrupt(bp->pdev->irq, dev);
10845 enable_irq(bp->pdev->irq);
10846}
10847#endif
10848
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080010849static const struct net_device_ops bnx2x_netdev_ops = {
10850 .ndo_open = bnx2x_open,
10851 .ndo_stop = bnx2x_close,
10852 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000010853 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080010854 .ndo_set_mac_address = bnx2x_change_mac_addr,
10855 .ndo_validate_addr = eth_validate_addr,
10856 .ndo_do_ioctl = bnx2x_ioctl,
10857 .ndo_change_mtu = bnx2x_change_mtu,
10858 .ndo_tx_timeout = bnx2x_tx_timeout,
10859#ifdef BCM_VLAN
10860 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10861#endif
10862#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10863 .ndo_poll_controller = poll_bnx2x,
10864#endif
10865};
10866
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010867static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10868 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010869{
10870 struct bnx2x *bp;
10871 int rc;
10872
10873 SET_NETDEV_DEV(dev, &pdev->dev);
10874 bp = netdev_priv(dev);
10875
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010876 bp->dev = dev;
10877 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010878 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010879 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010880
10881 rc = pci_enable_device(pdev);
10882 if (rc) {
10883 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10884 goto err_out;
10885 }
10886
10887 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10888 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10889 " aborting\n");
10890 rc = -ENODEV;
10891 goto err_out_disable;
10892 }
10893
10894 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10895 printk(KERN_ERR PFX "Cannot find second PCI device"
10896 " base address, aborting\n");
10897 rc = -ENODEV;
10898 goto err_out_disable;
10899 }
10900
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010901 if (atomic_read(&pdev->enable_cnt) == 1) {
10902 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10903 if (rc) {
10904 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10905 " aborting\n");
10906 goto err_out_disable;
10907 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010908
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010909 pci_set_master(pdev);
10910 pci_save_state(pdev);
10911 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010912
10913 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10914 if (bp->pm_cap == 0) {
10915 printk(KERN_ERR PFX "Cannot find power management"
10916 " capability, aborting\n");
10917 rc = -EIO;
10918 goto err_out_release;
10919 }
10920
10921 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10922 if (bp->pcie_cap == 0) {
10923 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10924 " aborting\n");
10925 rc = -EIO;
10926 goto err_out_release;
10927 }
10928
10929 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10930 bp->flags |= USING_DAC_FLAG;
10931 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10932 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10933 " failed, aborting\n");
10934 rc = -EIO;
10935 goto err_out_release;
10936 }
10937
10938 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10939 printk(KERN_ERR PFX "System does not support DMA,"
10940 " aborting\n");
10941 rc = -EIO;
10942 goto err_out_release;
10943 }
10944
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010945 dev->mem_start = pci_resource_start(pdev, 0);
10946 dev->base_addr = dev->mem_start;
10947 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010948
10949 dev->irq = pdev->irq;
10950
Arjan van de Ven275f1652008-10-20 21:42:39 -070010951 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010952 if (!bp->regview) {
10953 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10954 rc = -ENOMEM;
10955 goto err_out_release;
10956 }
10957
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010958 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10959 min_t(u64, BNX2X_DB_SIZE,
10960 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010961 if (!bp->doorbells) {
10962 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10963 rc = -ENOMEM;
10964 goto err_out_unmap;
10965 }
10966
10967 bnx2x_set_power_state(bp, PCI_D0);
10968
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010969 /* clean indirect addresses */
10970 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10971 PCICFG_VENDOR_ID_OFFSET);
10972 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10973 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10974 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10975 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010976
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010977 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010978
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080010979 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010980 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010981 dev->features |= NETIF_F_SG;
10982 dev->features |= NETIF_F_HW_CSUM;
10983 if (bp->flags & USING_DAC_FLAG)
10984 dev->features |= NETIF_F_HIGHDMA;
10985#ifdef BCM_VLAN
10986 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010987 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010988#endif
10989 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010990 dev->features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010991
10992 return 0;
10993
10994err_out_unmap:
10995 if (bp->regview) {
10996 iounmap(bp->regview);
10997 bp->regview = NULL;
10998 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010999 if (bp->doorbells) {
11000 iounmap(bp->doorbells);
11001 bp->doorbells = NULL;
11002 }
11003
11004err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011005 if (atomic_read(&pdev->enable_cnt) == 1)
11006 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011007
11008err_out_disable:
11009 pci_disable_device(pdev);
11010 pci_set_drvdata(pdev, NULL);
11011
11012err_out:
11013 return rc;
11014}
11015
Eliezer Tamir25047952008-02-28 11:50:16 -080011016static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11017{
11018 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11019
11020 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11021 return val;
11022}
11023
11024/* return value of 1=2.5GHz 2=5GHz */
11025static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11026{
11027 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11028
11029 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11030 return val;
11031}
11032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011033static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11034 const struct pci_device_id *ent)
11035{
11036 static int version_printed;
11037 struct net_device *dev = NULL;
11038 struct bnx2x *bp;
Eliezer Tamir25047952008-02-28 11:50:16 -080011039 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011040
11041 if (version_printed++ == 0)
11042 printk(KERN_INFO "%s", version);
11043
11044 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011045 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011046 if (!dev) {
11047 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011048 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011049 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011050
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011051 bp = netdev_priv(dev);
11052 bp->msglevel = debug;
11053
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011054 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011055 if (rc < 0) {
11056 free_netdev(dev);
11057 return rc;
11058 }
11059
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011060 pci_set_drvdata(pdev, dev);
11061
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011062 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011063 if (rc)
11064 goto init_one_exit;
11065
11066 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011067 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011068 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011069 goto init_one_exit;
11070 }
11071
Eliezer Tamir25047952008-02-28 11:50:16 -080011072 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000011073 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011074 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eliezer Tamir25047952008-02-28 11:50:16 -080011075 bnx2x_get_pcie_width(bp),
11076 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11077 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070011078 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011079 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011080
11081init_one_exit:
11082 if (bp->regview)
11083 iounmap(bp->regview);
11084
11085 if (bp->doorbells)
11086 iounmap(bp->doorbells);
11087
11088 free_netdev(dev);
11089
11090 if (atomic_read(&pdev->enable_cnt) == 1)
11091 pci_release_regions(pdev);
11092
11093 pci_disable_device(pdev);
11094 pci_set_drvdata(pdev, NULL);
11095
11096 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011097}
11098
11099static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11100{
11101 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011102 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011103
Eliezer Tamir228241e2008-02-28 11:56:57 -080011104 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080011105 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11106 return;
11107 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011108 bp = netdev_priv(dev);
11109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011110 unregister_netdev(dev);
11111
11112 if (bp->regview)
11113 iounmap(bp->regview);
11114
11115 if (bp->doorbells)
11116 iounmap(bp->doorbells);
11117
11118 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011119
11120 if (atomic_read(&pdev->enable_cnt) == 1)
11121 pci_release_regions(pdev);
11122
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011123 pci_disable_device(pdev);
11124 pci_set_drvdata(pdev, NULL);
11125}
11126
11127static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11128{
11129 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011130 struct bnx2x *bp;
11131
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011132 if (!dev) {
11133 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11134 return -ENODEV;
11135 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011136 bp = netdev_priv(dev);
11137
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011138 rtnl_lock();
11139
11140 pci_save_state(pdev);
11141
11142 if (!netif_running(dev)) {
11143 rtnl_unlock();
11144 return 0;
11145 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011146
11147 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011148
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011149 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011150
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011151 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080011152
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011153 rtnl_unlock();
11154
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011155 return 0;
11156}
11157
11158static int bnx2x_resume(struct pci_dev *pdev)
11159{
11160 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011161 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011162 int rc;
11163
Eliezer Tamir228241e2008-02-28 11:56:57 -080011164 if (!dev) {
11165 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11166 return -ENODEV;
11167 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011168 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011169
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011170 rtnl_lock();
11171
Eliezer Tamir228241e2008-02-28 11:56:57 -080011172 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011173
11174 if (!netif_running(dev)) {
11175 rtnl_unlock();
11176 return 0;
11177 }
11178
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011179 bnx2x_set_power_state(bp, PCI_D0);
11180 netif_device_attach(dev);
11181
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011182 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011183
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011184 rtnl_unlock();
11185
11186 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011187}
11188
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011189static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11190{
11191 int i;
11192
11193 bp->state = BNX2X_STATE_ERROR;
11194
11195 bp->rx_mode = BNX2X_RX_MODE_NONE;
11196
11197 bnx2x_netif_stop(bp, 0);
11198
11199 del_timer_sync(&bp->timer);
11200 bp->stats_state = STATS_STATE_DISABLED;
11201 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11202
11203 /* Release IRQs */
11204 bnx2x_free_irq(bp);
11205
11206 if (CHIP_IS_E1(bp)) {
11207 struct mac_configuration_cmd *config =
11208 bnx2x_sp(bp, mcast_config);
11209
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011210 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011211 CAM_INVALIDATE(config->config_table[i]);
11212 }
11213
11214 /* Free SKBs, SGEs, TPA pool and driver internals */
11215 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011216 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011217 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011218 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000011219 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011220 bnx2x_free_mem(bp);
11221
11222 bp->state = BNX2X_STATE_CLOSED;
11223
11224 netif_carrier_off(bp->dev);
11225
11226 return 0;
11227}
11228
11229static void bnx2x_eeh_recover(struct bnx2x *bp)
11230{
11231 u32 val;
11232
11233 mutex_init(&bp->port.phy_mutex);
11234
11235 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11236 bp->link_params.shmem_base = bp->common.shmem_base;
11237 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11238
11239 if (!bp->common.shmem_base ||
11240 (bp->common.shmem_base < 0xA0000) ||
11241 (bp->common.shmem_base >= 0xC0000)) {
11242 BNX2X_DEV_INFO("MCP not active\n");
11243 bp->flags |= NO_MCP_FLAG;
11244 return;
11245 }
11246
11247 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11248 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11249 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11250 BNX2X_ERR("BAD MCP validity signature\n");
11251
11252 if (!BP_NOMCP(bp)) {
11253 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11254 & DRV_MSG_SEQ_NUMBER_MASK);
11255 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11256 }
11257}
11258
Wendy Xiong493adb12008-06-23 20:36:22 -070011259/**
11260 * bnx2x_io_error_detected - called when PCI error is detected
11261 * @pdev: Pointer to PCI device
11262 * @state: The current pci connection state
11263 *
11264 * This function is called after a PCI bus error affecting
11265 * this device has been detected.
11266 */
11267static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11268 pci_channel_state_t state)
11269{
11270 struct net_device *dev = pci_get_drvdata(pdev);
11271 struct bnx2x *bp = netdev_priv(dev);
11272
11273 rtnl_lock();
11274
11275 netif_device_detach(dev);
11276
11277 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011278 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070011279
11280 pci_disable_device(pdev);
11281
11282 rtnl_unlock();
11283
11284 /* Request a slot reset */
11285 return PCI_ERS_RESULT_NEED_RESET;
11286}
11287
11288/**
11289 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11290 * @pdev: Pointer to PCI device
11291 *
11292 * Restart the card from scratch, as if from a cold-boot.
11293 */
11294static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11295{
11296 struct net_device *dev = pci_get_drvdata(pdev);
11297 struct bnx2x *bp = netdev_priv(dev);
11298
11299 rtnl_lock();
11300
11301 if (pci_enable_device(pdev)) {
11302 dev_err(&pdev->dev,
11303 "Cannot re-enable PCI device after reset\n");
11304 rtnl_unlock();
11305 return PCI_ERS_RESULT_DISCONNECT;
11306 }
11307
11308 pci_set_master(pdev);
11309 pci_restore_state(pdev);
11310
11311 if (netif_running(dev))
11312 bnx2x_set_power_state(bp, PCI_D0);
11313
11314 rtnl_unlock();
11315
11316 return PCI_ERS_RESULT_RECOVERED;
11317}
11318
11319/**
11320 * bnx2x_io_resume - called when traffic can start flowing again
11321 * @pdev: Pointer to PCI device
11322 *
11323 * This callback is called when the error recovery driver tells us that
11324 * its OK to resume normal operation.
11325 */
11326static void bnx2x_io_resume(struct pci_dev *pdev)
11327{
11328 struct net_device *dev = pci_get_drvdata(pdev);
11329 struct bnx2x *bp = netdev_priv(dev);
11330
11331 rtnl_lock();
11332
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011333 bnx2x_eeh_recover(bp);
11334
Wendy Xiong493adb12008-06-23 20:36:22 -070011335 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011336 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070011337
11338 netif_device_attach(dev);
11339
11340 rtnl_unlock();
11341}
11342
11343static struct pci_error_handlers bnx2x_err_handler = {
11344 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011345 .slot_reset = bnx2x_io_slot_reset,
11346 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070011347};
11348
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011349static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070011350 .name = DRV_MODULE_NAME,
11351 .id_table = bnx2x_pci_tbl,
11352 .probe = bnx2x_init_one,
11353 .remove = __devexit_p(bnx2x_remove_one),
11354 .suspend = bnx2x_suspend,
11355 .resume = bnx2x_resume,
11356 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011357};
11358
11359static int __init bnx2x_init(void)
11360{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080011361 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11362 if (bnx2x_wq == NULL) {
11363 printk(KERN_ERR PFX "Cannot create workqueue\n");
11364 return -ENOMEM;
11365 }
11366
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011367 return pci_register_driver(&bnx2x_pci_driver);
11368}
11369
11370static void __exit bnx2x_cleanup(void)
11371{
11372 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080011373
11374 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011375}
11376
11377module_init(bnx2x_init);
11378module_exit(bnx2x_cleanup);
11379