blob: f20df6d7dcb5f3da06b71ad78c650f5b65875422 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
52
Eilon Greenstein359d8b12009-02-12 08:38:25 +000053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000057#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov56ed4352009-04-27 03:28:25 -070059#define DRV_MODULE_VERSION "1.48.105-1"
60#define DRV_MODULE_RELDATE "2009/04/22"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070061#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020062
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070063#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
Eilon Greenstein34f80b02008-06-23 20:33:01 -070069/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020071
Andrew Morton53a10562008-02-09 23:16:41 -080072static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070073 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070076MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000077MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020078MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080
Eilon Greenstein555f6c72009-02-12 08:36:11 +000081static int multi_mode = 1;
82module_param(multi_mode, int, 0);
Eilon Greenstein2059aba2009-03-02 07:59:48 +000083MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000084
Eilon Greenstein19680c42008-08-13 15:47:33 -070085static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070086module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000087MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000088
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
Eilon Greenstein9898f862009-02-12 08:38:27 +000093static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020094module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000095MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +000096
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200102module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200106
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800107static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200108
109enum bnx2x_board_type {
110 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700111 BCM57711 = 1,
112 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200113};
114
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700115/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800116static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200117 char *name;
118} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200122};
123
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700124
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200190{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700191 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700225 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200237
238 *wb_comp = 0;
239
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241
242 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000248 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200249 break;
250 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700251 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200257 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200260}
261
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200263{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700264 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700301 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200302
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200310
311 *wb_comp = 0;
312
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
315 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700319 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000320 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200321 break;
322 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700323 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700333
334 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200335}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
345}
346
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200360 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200363
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200369
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200372
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200381
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200389 }
390 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
473 }
474 }
475
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200476 return rc;
477}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800478
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000482 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800494 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800501 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 BNX2X_ERR("begin crash dump -----------------\n");
515
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
526 struct bnx2x_fastpath *fp = &bp->fp[i];
527
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i, fp->rx_bd_prod, fp->rx_bd_cons,
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
540
541 /* Tx */
542 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700552 fp->status_blk->c_status_block.status_block_index,
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700553 hw_prods->packets_prod, hw_prods->bds_prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000554 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200555
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000563 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200569 }
570
Eilon Greenstein3196a882008-08-13 15:58:49 -0700571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000573 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700579 }
580
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200588 }
589 }
590
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000611 }
612 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200613
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700614 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200617}
618
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800619static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200620{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700621 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200626
627 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800642
Eilon Greenstein8badd272009-02-12 08:36:15 +0000643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800645
646 REG_WR(bp, addr, val);
647
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
Eilon Greenstein8badd272009-02-12 08:36:15 +0000651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200653
654 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000655 /*
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
657 */
658 mmiowb();
659 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700660
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
663 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700665 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000666 /* enable nig and gpio3 attention */
667 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700668 } else
669 val = 0xffff;
670
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000674
675 /* Make sure that interrupts are indeed enabled from here on */
676 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200677}
678
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800679static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200680{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700681 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
684
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691 val, port, addr);
692
Eilon Greenstein8badd272009-02-12 08:36:15 +0000693 /* flush all outstanding writes */
694 mmiowb();
695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
Eilon Greenstein356e2382009-02-12 08:38:32 +0000699
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200700}
701
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700702static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200703{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000705 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200706
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700707 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200708 atomic_inc(&bp->intr_sem);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700709 if (disable_hw)
710 /* prevent the HW from sending interrupts */
711 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712
713 /* make sure all ISRs are done */
714 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000715 synchronize_irq(bp->msix_table[0].vector);
716 offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000718 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719 } else
720 synchronize_irq(bp->pdev->irq);
721
722 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800723 cancel_delayed_work(&bp->sp_task);
724 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725}
726
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700727/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
729/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700730 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200731 */
732
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700733static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200734 u8 storm, u16 index, u8 op, u8 update)
735{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700736 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200738 struct igu_ack_register igu_ack;
739
740 igu_ack.status_block_index = index;
741 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700742 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
744 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
745 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
746
Eilon Greenstein5c862842008-08-13 15:51:48 -0700747 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
748 (*(u32 *)&igu_ack), hc_addr);
749 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000750
751 /* Make sure that ACK is written */
752 mmiowb();
753 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200754}
755
756static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
757{
758 struct host_status_block *fpsb = fp->status_blk;
759 u16 rc = 0;
760
761 barrier(); /* status block is written to by the chip */
762 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
763 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
764 rc |= 1;
765 }
766 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
767 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
768 rc |= 2;
769 }
770 return rc;
771}
772
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200773static u16 bnx2x_ack_int(struct bnx2x *bp)
774{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700775 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
776 COMMAND_REG_SIMD_MASK);
777 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200778
Eilon Greenstein5c862842008-08-13 15:51:48 -0700779 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
780 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200781
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200782 return result;
783}
784
785
786/*
787 * fast path service functions
788 */
789
Eilon Greenstein237907c2009-01-14 06:42:44 +0000790static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
791{
792 u16 tx_cons_sb;
793
794 /* Tell compiler that status block fields can change */
795 barrier();
796 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800797 return (fp->tx_pkt_cons != tx_cons_sb);
798}
799
800static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
801{
802 /* Tell compiler that consumer and producer can change */
803 barrier();
804 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000805}
806
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807/* free skb in the packet ring at pos idx
808 * return idx of last bd freed
809 */
810static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
811 u16 idx)
812{
813 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
814 struct eth_tx_bd *tx_bd;
815 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700816 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200817 int nbd;
818
819 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
820 idx, tx_buf, skb);
821
822 /* unmap first bd */
823 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
824 tx_bd = &fp->tx_desc_ring[bd_idx];
825 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
826 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
827
828 nbd = le16_to_cpu(tx_bd->nbd) - 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700829 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200830#ifdef BNX2X_STOP_ON_ERROR
831 if (nbd > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700832 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200833 bnx2x_panic();
834 }
835#endif
836
837 /* Skip a parse bd and the TSO split header bd
838 since they have no mapping */
839 if (nbd)
840 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
841
842 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
843 ETH_TX_BD_FLAGS_TCP_CSUM |
844 ETH_TX_BD_FLAGS_SW_LSO)) {
845 if (--nbd)
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 /* is this a TSO split header bd? */
849 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
850 if (--nbd)
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 }
853 }
854
855 /* now free frags */
856 while (nbd > 0) {
857
858 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
859 tx_bd = &fp->tx_desc_ring[bd_idx];
860 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
861 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
862 if (--nbd)
863 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
864 }
865
866 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700867 WARN_ON(!skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 dev_kfree_skb(skb);
869 tx_buf->first_bd = 0;
870 tx_buf->skb = NULL;
871
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700872 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873}
874
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700875static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200876{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700877 s16 used;
878 u16 prod;
879 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200880
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700881 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882 prod = fp->tx_bd_prod;
883 cons = fp->tx_bd_cons;
884
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700885 /* NUM_TX_RINGS = number of "next-page" entries
886 It will be used as a threshold */
887 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700889#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700890 WARN_ON(used < 0);
891 WARN_ON(used > fp->bp->tx_ring_size);
892 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700893#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200894
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700895 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200896}
897
Eilon Greenstein7961f792009-03-02 07:59:31 +0000898static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200899{
900 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000901 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200902 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
903 int done = 0;
904
905#ifdef BNX2X_STOP_ON_ERROR
906 if (unlikely(bp->panic))
907 return;
908#endif
909
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000910 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
912 sw_cons = fp->tx_pkt_cons;
913
914 while (sw_cons != hw_cons) {
915 u16 pkt_cons;
916
917 pkt_cons = TX_BD(sw_cons);
918
919 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700921 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200922 hw_cons, sw_cons, pkt_cons);
923
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700924/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200925 rmb();
926 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
927 }
928*/
929 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
930 sw_cons++;
931 done++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932 }
933
934 fp->tx_pkt_cons = sw_cons;
935 fp->tx_bd_cons = bd_cons;
936
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200937 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000938 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200939
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000940 __netif_tx_lock(txq, smp_processor_id());
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200941
Eilon Greenstein60447352009-03-02 07:59:24 +0000942 /* Need to make the tx_bd_cons update visible to start_xmit()
943 * before checking for netif_tx_queue_stopped(). Without the
944 * memory barrier, there is a small possibility that
945 * start_xmit() will miss it and cause the queue to be stopped
946 * forever.
947 */
948 smp_mb();
949
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000950 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700951 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200952 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000953 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200954
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000955 __netif_tx_unlock(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200956 }
957}
958
Eilon Greenstein3196a882008-08-13 15:58:49 -0700959
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
961 union eth_rx_cqe *rr_cqe)
962{
963 struct bnx2x *bp = fp->bp;
964 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
965 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
966
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700967 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000969 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700970 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200971
972 bp->spq_left++;
973
Eilon Greenstein0626b892009-02-12 08:38:14 +0000974 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 switch (command | fp->state) {
976 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
977 BNX2X_FP_STATE_OPENING):
978 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
979 cid);
980 fp->state = BNX2X_FP_STATE_OPEN;
981 break;
982
983 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
984 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
985 cid);
986 fp->state = BNX2X_FP_STATE_HALTED;
987 break;
988
989 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 BNX2X_ERR("unexpected MC reply (%d) "
991 "fp->state is %x\n", command, fp->state);
992 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200993 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700994 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200995 return;
996 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800997
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200998 switch (command | bp->state) {
999 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1000 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1001 bp->state = BNX2X_STATE_OPEN;
1002 break;
1003
1004 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1005 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1006 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1007 fp->state = BNX2X_FP_STATE_HALTED;
1008 break;
1009
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001010 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001011 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001012 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001013 break;
1014
Eilon Greenstein3196a882008-08-13 15:58:49 -07001015
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001016 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001018 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001019 bp->set_mac_pending = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001020 break;
1021
Eliezer Tamir49d66772008-02-28 11:53:13 -08001022 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001023 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Eliezer Tamir49d66772008-02-28 11:53:13 -08001024 break;
1025
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001026 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001027 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001028 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001029 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001030 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001031 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001032}
1033
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001034static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, u16 index)
1036{
1037 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038 struct page *page = sw_buf->page;
1039 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1040
1041 /* Skip "next page" elements */
1042 if (!page)
1043 return;
1044
1045 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001046 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001047 __free_pages(page, PAGES_PER_SGE_SHIFT);
1048
1049 sw_buf->page = NULL;
1050 sge->addr_hi = 0;
1051 sge->addr_lo = 0;
1052}
1053
1054static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055 struct bnx2x_fastpath *fp, int last)
1056{
1057 int i;
1058
1059 for (i = 0; i < last; i++)
1060 bnx2x_free_rx_sge(bp, fp, i);
1061}
1062
1063static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064 struct bnx2x_fastpath *fp, u16 index)
1065{
1066 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1069 dma_addr_t mapping;
1070
1071 if (unlikely(page == NULL))
1072 return -ENOMEM;
1073
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001074 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001075 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001077 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078 return -ENOMEM;
1079 }
1080
1081 sw_buf->page = page;
1082 pci_unmap_addr_set(sw_buf, mapping, mapping);
1083
1084 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086
1087 return 0;
1088}
1089
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001090static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091 struct bnx2x_fastpath *fp, u16 index)
1092{
1093 struct sk_buff *skb;
1094 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1096 dma_addr_t mapping;
1097
1098 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099 if (unlikely(skb == NULL))
1100 return -ENOMEM;
1101
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001102 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001103 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001104 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001105 dev_kfree_skb(skb);
1106 return -ENOMEM;
1107 }
1108
1109 rx_buf->skb = skb;
1110 pci_unmap_addr_set(rx_buf, mapping, mapping);
1111
1112 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114
1115 return 0;
1116}
1117
1118/* note that we are not allocating a new skb,
1119 * we are just moving one from cons to prod
1120 * we are not creating a new mapping,
1121 * so there is no need to check for dma_mapping_error().
1122 */
1123static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124 struct sk_buff *skb, u16 cons, u16 prod)
1125{
1126 struct bnx2x *bp = fp->bp;
1127 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1131
1132 pci_dma_sync_single_for_device(bp->pdev,
1133 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001134 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135
1136 prod_rx_buf->skb = cons_rx_buf->skb;
1137 pci_unmap_addr_set(prod_rx_buf, mapping,
1138 pci_unmap_addr(cons_rx_buf, mapping));
1139 *prod_bd = *cons_bd;
1140}
1141
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001142static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1143 u16 idx)
1144{
1145 u16 last_max = fp->last_max_sge;
1146
1147 if (SUB_S16(idx, last_max) > 0)
1148 fp->last_max_sge = idx;
1149}
1150
1151static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152{
1153 int i, j;
1154
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 int idx = RX_SGE_CNT * i - 1;
1157
1158 for (j = 0; j < 2; j++) {
1159 SGE_MASK_CLEAR_BIT(fp, idx);
1160 idx--;
1161 }
1162 }
1163}
1164
1165static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166 struct eth_fast_path_rx_cqe *fp_cqe)
1167{
1168 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001169 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001170 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001171 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001172 u16 last_max, last_elem, first_elem;
1173 u16 delta = 0;
1174 u16 i;
1175
1176 if (!sge_len)
1177 return;
1178
1179 /* First mark all used pages */
1180 for (i = 0; i < sge_len; i++)
1181 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1182
1183 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 /* Here we assume that the last SGE index is the biggest */
1187 prefetch((void *)(fp->sge_mask));
1188 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189
1190 last_max = RX_SGE(fp->last_max_sge);
1191 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1193
1194 /* If ring is not full */
1195 if (last_elem + 1 != first_elem)
1196 last_elem++;
1197
1198 /* Now update the prod */
1199 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200 if (likely(fp->sge_mask[i]))
1201 break;
1202
1203 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204 delta += RX_SGE_MASK_ELEM_SZ;
1205 }
1206
1207 if (delta > 0) {
1208 fp->rx_sge_prod += delta;
1209 /* clear page-end entries */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211 }
1212
1213 DP(NETIF_MSG_RX_STATUS,
1214 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1215 fp->last_max_sge, fp->rx_sge_prod);
1216}
1217
1218static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1219{
1220 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221 memset(fp->sge_mask, 0xff,
1222 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1223
Eilon Greenstein33471622008-08-13 15:59:08 -07001224 /* Clear the two last indices in the page to 1:
1225 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001226 hence will never be indicated and should be removed from
1227 the calculations. */
1228 bnx2x_clear_sge_mask_next_elems(fp);
1229}
1230
1231static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232 struct sk_buff *skb, u16 cons, u16 prod)
1233{
1234 struct bnx2x *bp = fp->bp;
1235 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1238 dma_addr_t mapping;
1239
1240 /* move empty skb from pool to prod and map it */
1241 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001243 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001244 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1245
1246 /* move partial skb from cons to pool (don't unmap yet) */
1247 fp->tpa_pool[queue] = *cons_rx_buf;
1248
1249 /* mark bin state as start - print error if current state != stop */
1250 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1252
1253 fp->tpa_state[queue] = BNX2X_TPA_START;
1254
1255 /* point prod_bd to new skb */
1256 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1258
1259#ifdef BNX2X_STOP_ON_ERROR
1260 fp->tpa_queue_used |= (1 << queue);
1261#ifdef __powerpc64__
1262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1263#else
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1265#endif
1266 fp->tpa_queue_used);
1267#endif
1268}
1269
1270static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 struct sk_buff *skb,
1272 struct eth_fast_path_rx_cqe *fp_cqe,
1273 u16 cqe_idx)
1274{
1275 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001276 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277 u32 i, frag_len, frag_size, pages;
1278 int err;
1279 int j;
1280
1281 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001282 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001283
1284 /* This is needed in order to enable forwarding support */
1285 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001286 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001287 max(frag_size, (u32)len_on_bd));
1288
1289#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001290 if (pages >
1291 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001292 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1293 pages, cqe_idx);
1294 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1295 fp_cqe->pkt_len, len_on_bd);
1296 bnx2x_panic();
1297 return -EINVAL;
1298 }
1299#endif
1300
1301 /* Run through the SGL and compose the fragmented skb */
1302 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1304
1305 /* FW gives the indices of the SGE as if the ring is an array
1306 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001307 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001309 old_rx_pg = *rx_pg;
1310
1311 /* If we fail to allocate a substitute page, we simply stop
1312 where we are and drop the whole packet */
1313 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001315 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001316 return err;
1317 }
1318
1319 /* Unmap the page as we r going to pass it to the stack */
1320 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001321 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001322
1323 /* Add one frag and update the appropriate fields in the skb */
1324 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1325
1326 skb->data_len += frag_len;
1327 skb->truesize += frag_len;
1328 skb->len += frag_len;
1329
1330 frag_size -= frag_len;
1331 }
1332
1333 return 0;
1334}
1335
1336static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1338 u16 cqe_idx)
1339{
1340 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341 struct sk_buff *skb = rx_buf->skb;
1342 /* alloc new skb */
1343 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1344
1345 /* Unmap skb in the pool anyway, as we are going to change
1346 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1347 fails. */
1348 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001349 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001350
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001351 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001352 /* fix ip xsum and give it to the stack */
1353 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001354#ifdef BCM_VLAN
1355 int is_vlan_cqe =
1356 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357 PARSING_FLAGS_VLAN);
1358 int is_not_hwaccel_vlan_cqe =
1359 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001361
1362 prefetch(skb);
1363 prefetch(((char *)(skb)) + 128);
1364
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001365#ifdef BNX2X_STOP_ON_ERROR
1366 if (pad + len > bp->rx_buf_size) {
1367 BNX2X_ERR("skb_put is about to fail... "
1368 "pad %d len %d rx_buf_size %d\n",
1369 pad, len, bp->rx_buf_size);
1370 bnx2x_panic();
1371 return;
1372 }
1373#endif
1374
1375 skb_reserve(skb, pad);
1376 skb_put(skb, len);
1377
1378 skb->protocol = eth_type_trans(skb, bp->dev);
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380
1381 {
1382 struct iphdr *iph;
1383
1384 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001385#ifdef BCM_VLAN
1386 /* If there is no Rx VLAN offloading -
1387 take VLAN tag into an account */
1388 if (unlikely(is_not_hwaccel_vlan_cqe))
1389 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1390#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001391 iph->check = 0;
1392 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1393 }
1394
1395 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396 &cqe->fast_path_cqe, cqe_idx)) {
1397#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001398 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001400 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401 le16_to_cpu(cqe->fast_path_cqe.
1402 vlan_tag));
1403 else
1404#endif
1405 netif_receive_skb(skb);
1406 } else {
1407 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408 " - dropping packet!\n");
1409 dev_kfree_skb(skb);
1410 }
1411
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001412
1413 /* put new skb in bin */
1414 fp->tpa_pool[queue].skb = new_skb;
1415
1416 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001417 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001418 DP(NETIF_MSG_RX_STATUS,
1419 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001420 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001421 }
1422
1423 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1424}
1425
1426static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427 struct bnx2x_fastpath *fp,
1428 u16 bd_prod, u16 rx_comp_prod,
1429 u16 rx_sge_prod)
1430{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001431 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001432 int i;
1433
1434 /* Update producers */
1435 rx_prods.bd_prod = bd_prod;
1436 rx_prods.cqe_prod = rx_comp_prod;
1437 rx_prods.sge_prod = rx_sge_prod;
1438
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001439 /*
1440 * Make sure that the BD and SGE data is updated before updating the
1441 * producers since FW might read the BD/SGE right after the producer
1442 * is updated.
1443 * This is only applicable for weak-ordered memory model archs such
1444 * as IA-64. The following barrier is also mandatory since FW will
1445 * assumes BDs must have buffers.
1446 */
1447 wmb();
1448
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001449 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001451 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001452 ((u32 *)&rx_prods)[i]);
1453
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001454 mmiowb(); /* keep prod updates ordered */
1455
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001456 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001457 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1458 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001459}
1460
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001461static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1462{
1463 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001464 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001465 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1466 int rx_pkt = 0;
1467
1468#ifdef BNX2X_STOP_ON_ERROR
1469 if (unlikely(bp->panic))
1470 return 0;
1471#endif
1472
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001473 /* CQ "next element" is of the size of the regular element,
1474 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001475 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1477 hw_comp_cons++;
1478
1479 bd_cons = fp->rx_bd_cons;
1480 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001481 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001482 sw_comp_cons = fp->rx_comp_cons;
1483 sw_comp_prod = fp->rx_comp_prod;
1484
1485 /* Memory barrier necessary as speculative reads of the rx
1486 * buffer can be ahead of the index in the status block
1487 */
1488 rmb();
1489
1490 DP(NETIF_MSG_RX_STATUS,
1491 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001492 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493
1494 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001495 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001496 struct sk_buff *skb;
1497 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001498 u8 cqe_fp_flags;
1499 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001500
1501 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502 bd_prod = RX_BD(bd_prod);
1503 bd_cons = RX_BD(bd_cons);
1504
1505 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001506 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001508 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001509 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1510 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001511 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001512 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1513 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001514
1515 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001516 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001517 bnx2x_sp_event(fp, cqe);
1518 goto next_cqe;
1519
1520 /* this is an rx packet */
1521 } else {
1522 rx_buf = &fp->rx_buf_ring[bd_cons];
1523 skb = rx_buf->skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001524 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1525 pad = cqe->fast_path_cqe.placement_offset;
1526
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001527 /* If CQE is marked both TPA_START and TPA_END
1528 it is a non-TPA CQE */
1529 if ((!fp->disable_tpa) &&
1530 (TPA_TYPE(cqe_fp_flags) !=
1531 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001532 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001533
1534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1535 DP(NETIF_MSG_RX_STATUS,
1536 "calling tpa_start on queue %d\n",
1537 queue);
1538
1539 bnx2x_tpa_start(fp, queue, skb,
1540 bd_cons, bd_prod);
1541 goto next_rx;
1542 }
1543
1544 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1545 DP(NETIF_MSG_RX_STATUS,
1546 "calling tpa_stop on queue %d\n",
1547 queue);
1548
1549 if (!BNX2X_RX_SUM_FIX(cqe))
1550 BNX2X_ERR("STOP on none TCP "
1551 "data\n");
1552
1553 /* This is a size of the linear data
1554 on this skb */
1555 len = le16_to_cpu(cqe->fast_path_cqe.
1556 len_on_bd);
1557 bnx2x_tpa_stop(bp, fp, queue, pad,
1558 len, cqe, comp_ring_cons);
1559#ifdef BNX2X_STOP_ON_ERROR
1560 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001561 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001562#endif
1563
1564 bnx2x_update_sge_prod(fp,
1565 &cqe->fast_path_cqe);
1566 goto next_cqe;
1567 }
1568 }
1569
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001570 pci_dma_sync_single_for_device(bp->pdev,
1571 pci_unmap_addr(rx_buf, mapping),
1572 pad + RX_COPY_THRESH,
1573 PCI_DMA_FROMDEVICE);
1574 prefetch(skb);
1575 prefetch(((char *)(skb)) + 128);
1576
1577 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001578 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001579 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001580 "ERROR flags %x rx packet %u\n",
1581 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001582 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001583 goto reuse_rx;
1584 }
1585
1586 /* Since we don't have a jumbo ring
1587 * copy small packets if mtu > 1500
1588 */
1589 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1590 (len <= RX_COPY_THRESH)) {
1591 struct sk_buff *new_skb;
1592
1593 new_skb = netdev_alloc_skb(bp->dev,
1594 len + pad);
1595 if (new_skb == NULL) {
1596 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001597 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001598 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001599 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001600 goto reuse_rx;
1601 }
1602
1603 /* aligned copy */
1604 skb_copy_from_linear_data_offset(skb, pad,
1605 new_skb->data + pad, len);
1606 skb_reserve(new_skb, pad);
1607 skb_put(new_skb, len);
1608
1609 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1610
1611 skb = new_skb;
1612
1613 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1614 pci_unmap_single(bp->pdev,
1615 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001616 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001617 PCI_DMA_FROMDEVICE);
1618 skb_reserve(skb, pad);
1619 skb_put(skb, len);
1620
1621 } else {
1622 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001623 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001625 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001626reuse_rx:
1627 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1628 goto next_rx;
1629 }
1630
1631 skb->protocol = eth_type_trans(skb, bp->dev);
1632
1633 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001634 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001635 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1636 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001637 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001638 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001639 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001640 }
1641
Eilon Greenstein748e5432009-02-12 08:36:37 +00001642 skb_record_rx_queue(skb, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001643#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001644 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001645 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1646 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001647 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1648 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1649 else
1650#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001651 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001652
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001653
1654next_rx:
1655 rx_buf->skb = NULL;
1656
1657 bd_cons = NEXT_RX_IDX(bd_cons);
1658 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001659 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1660 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001661next_cqe:
1662 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1663 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001664
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001665 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001666 break;
1667 } /* while */
1668
1669 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001670 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001671 fp->rx_comp_cons = sw_comp_cons;
1672 fp->rx_comp_prod = sw_comp_prod;
1673
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001674 /* Update producers */
1675 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1676 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001677
1678 fp->rx_pkt += rx_pkt;
1679 fp->rx_calls++;
1680
1681 return rx_pkt;
1682}
1683
1684static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1685{
1686 struct bnx2x_fastpath *fp = fp_cookie;
1687 struct bnx2x *bp = fp->bp;
Eilon Greenstein0626b892009-02-12 08:38:14 +00001688 int index = fp->index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001689
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001690 /* Return here if interrupt is disabled */
1691 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1692 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1693 return IRQ_HANDLED;
1694 }
1695
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001696 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001697 index, fp->sb_id);
1698 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001699
1700#ifdef BNX2X_STOP_ON_ERROR
1701 if (unlikely(bp->panic))
1702 return IRQ_HANDLED;
1703#endif
1704
1705 prefetch(fp->rx_cons_sb);
1706 prefetch(fp->tx_cons_sb);
1707 prefetch(&fp->status_blk->c_status_block.status_block_index);
1708 prefetch(&fp->status_blk->u_status_block.status_block_index);
1709
Ben Hutchings288379f2009-01-19 16:43:59 -08001710 napi_schedule(&bnx2x_fp(bp, index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001711
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001712 return IRQ_HANDLED;
1713}
1714
1715static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1716{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001717 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001718 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001719 u16 mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001721 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001722 if (unlikely(status == 0)) {
1723 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1724 return IRQ_NONE;
1725 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001726 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001727
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001728 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001729 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1730 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1731 return IRQ_HANDLED;
1732 }
1733
Eilon Greenstein3196a882008-08-13 15:58:49 -07001734#ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1736 return IRQ_HANDLED;
1737#endif
1738
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001739 mask = 0x2 << bp->fp[0].sb_id;
1740 if (status & mask) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741 struct bnx2x_fastpath *fp = &bp->fp[0];
1742
1743 prefetch(fp->rx_cons_sb);
1744 prefetch(fp->tx_cons_sb);
1745 prefetch(&fp->status_blk->c_status_block.status_block_index);
1746 prefetch(&fp->status_blk->u_status_block.status_block_index);
1747
Ben Hutchings288379f2009-01-19 16:43:59 -08001748 napi_schedule(&bnx2x_fp(bp, 0, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001749
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001750 status &= ~mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001751 }
1752
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001753
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001754 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001755 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001756
1757 status &= ~0x1;
1758 if (!status)
1759 return IRQ_HANDLED;
1760 }
1761
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001762 if (status)
1763 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1764 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765
1766 return IRQ_HANDLED;
1767}
1768
1769/* end of fast path */
1770
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001771static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001772
1773/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001774
1775/*
1776 * General service functions
1777 */
1778
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001779static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001780{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001781 u32 lock_status;
1782 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001783 int func = BP_FUNC(bp);
1784 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001785 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001786
1787 /* Validating that the resource is within range */
1788 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1789 DP(NETIF_MSG_HW,
1790 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1791 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1792 return -EINVAL;
1793 }
1794
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001795 if (func <= 5) {
1796 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1797 } else {
1798 hw_lock_control_reg =
1799 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1800 }
1801
Eliezer Tamirf1410642008-02-28 11:51:50 -08001802 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001803 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001804 if (lock_status & resource_bit) {
1805 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1806 lock_status, resource_bit);
1807 return -EEXIST;
1808 }
1809
Eilon Greenstein46230472008-08-25 15:23:30 -07001810 /* Try for 5 second every 5ms */
1811 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001812 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001813 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1814 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001815 if (lock_status & resource_bit)
1816 return 0;
1817
1818 msleep(5);
1819 }
1820 DP(NETIF_MSG_HW, "Timeout\n");
1821 return -EAGAIN;
1822}
1823
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001824static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001825{
1826 u32 lock_status;
1827 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001828 int func = BP_FUNC(bp);
1829 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001830
1831 /* Validating that the resource is within range */
1832 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1833 DP(NETIF_MSG_HW,
1834 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1835 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1836 return -EINVAL;
1837 }
1838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839 if (func <= 5) {
1840 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1841 } else {
1842 hw_lock_control_reg =
1843 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1844 }
1845
Eliezer Tamirf1410642008-02-28 11:51:50 -08001846 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001847 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001848 if (!(lock_status & resource_bit)) {
1849 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1850 lock_status, resource_bit);
1851 return -EFAULT;
1852 }
1853
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001854 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001855 return 0;
1856}
1857
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001858/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001859static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001860{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001861 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001862
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001863 if (bp->port.need_hw_lock)
1864 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001865}
1866
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001867static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001868{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001869 if (bp->port.need_hw_lock)
1870 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001871
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001872 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001873}
1874
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001875int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1876{
1877 /* The GPIO should be swapped if swap register is set and active */
1878 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1879 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1880 int gpio_shift = gpio_num +
1881 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1882 u32 gpio_mask = (1 << gpio_shift);
1883 u32 gpio_reg;
1884 int value;
1885
1886 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1887 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1888 return -EINVAL;
1889 }
1890
1891 /* read GPIO value */
1892 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1893
1894 /* get the requested pin value */
1895 if ((gpio_reg & gpio_mask) == gpio_mask)
1896 value = 1;
1897 else
1898 value = 0;
1899
1900 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1901
1902 return value;
1903}
1904
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001905int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914
1915 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1916 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1917 return -EINVAL;
1918 }
1919
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001921 /* read GPIO and mask except the float bits */
1922 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1923
1924 switch (mode) {
1925 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1926 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1927 gpio_num, gpio_shift);
1928 /* clear FLOAT and set CLR */
1929 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1930 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1931 break;
1932
1933 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1934 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1935 gpio_num, gpio_shift);
1936 /* clear FLOAT and set SET */
1937 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1938 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1939 break;
1940
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001941 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001942 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1943 gpio_num, gpio_shift);
1944 /* set FLOAT */
1945 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1946 break;
1947
1948 default:
1949 break;
1950 }
1951
1952 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001954
1955 return 0;
1956}
1957
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001958int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1959{
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1966 u32 gpio_reg;
1967
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970 return -EINVAL;
1971 }
1972
1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1974 /* read GPIO int */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1976
1977 switch (mode) {
1978 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1979 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1980 "output low\n", gpio_num, gpio_shift);
1981 /* clear SET and set CLR */
1982 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1983 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1984 break;
1985
1986 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1987 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1988 "output high\n", gpio_num, gpio_shift);
1989 /* clear CLR and set SET */
1990 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1991 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1992 break;
1993
1994 default:
1995 break;
1996 }
1997
1998 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1999 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2000
2001 return 0;
2002}
2003
Eliezer Tamirf1410642008-02-28 11:51:50 -08002004static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2005{
2006 u32 spio_mask = (1 << spio_num);
2007 u32 spio_reg;
2008
2009 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2010 (spio_num > MISC_REGISTERS_SPIO_7)) {
2011 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2012 return -EINVAL;
2013 }
2014
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002015 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002016 /* read SPIO and mask except the float bits */
2017 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2018
2019 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002020 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002021 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2022 /* clear FLOAT and set CLR */
2023 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2024 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2025 break;
2026
Eilon Greenstein6378c022008-08-13 15:59:25 -07002027 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002028 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2029 /* clear FLOAT and set SET */
2030 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2031 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2032 break;
2033
2034 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2035 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2036 /* set FLOAT */
2037 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2038 break;
2039
2040 default:
2041 break;
2042 }
2043
2044 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002045 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002046
2047 return 0;
2048}
2049
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002050static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002051{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002052 switch (bp->link_vars.ieee_fc &
2053 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002054 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002055 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002056 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002057 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002058
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002059 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002060 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002061 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002062 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002063
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002064 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002065 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002066 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002067
Eliezer Tamirf1410642008-02-28 11:51:50 -08002068 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002069 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002070 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002071 break;
2072 }
2073}
2074
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002075static void bnx2x_link_report(struct bnx2x *bp)
2076{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002077 if (bp->link_vars.link_up) {
2078 if (bp->state == BNX2X_STATE_OPEN)
2079 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002080 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2081
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002082 printk("%d Mbps ", bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002083
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002084 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002085 printk("full duplex");
2086 else
2087 printk("half duplex");
2088
David S. Millerc0700f92008-12-16 23:53:20 -08002089 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2090 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002091 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002092 if (bp->link_vars.flow_ctrl &
2093 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002094 printk("& transmit ");
2095 } else {
2096 printk(", transmit ");
2097 }
2098 printk("flow control ON");
2099 }
2100 printk("\n");
2101
2102 } else { /* link_down */
2103 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002104 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002105 }
2106}
2107
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002108static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002109{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002110 if (!BP_NOMCP(bp)) {
2111 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002112
Eilon Greenstein19680c42008-08-13 15:47:33 -07002113 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002114 /* It is recommended to turn off RX FC for jumbo frames
2115 for better performance */
2116 if (IS_E1HMF(bp))
David S. Millerc0700f92008-12-16 23:53:20 -08002117 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002118 else if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002120 else
David S. Millerc0700f92008-12-16 23:53:20 -08002121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002122
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002123 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002124
2125 if (load_mode == LOAD_DIAG)
2126 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2127
Eilon Greenstein19680c42008-08-13 15:47:33 -07002128 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002129
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002130 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002131
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002132 bnx2x_calc_fc_adv(bp);
2133
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002134 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2135 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002136 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002137 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002138
Eilon Greenstein19680c42008-08-13 15:47:33 -07002139 return rc;
2140 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002141 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002142 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002143}
2144
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002145static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002146{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002147 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002148 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002149 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002150 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002151
Eilon Greenstein19680c42008-08-13 15:47:33 -07002152 bnx2x_calc_fc_adv(bp);
2153 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002154 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002155}
2156
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002157static void bnx2x__link_reset(struct bnx2x *bp)
2158{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002159 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002160 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002161 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002162 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002163 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002164 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002165}
2166
2167static u8 bnx2x_link_test(struct bnx2x *bp)
2168{
2169 u8 rc;
2170
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002171 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002172 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002173 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002174
2175 return rc;
2176}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002177
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002178static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002179{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002180 u32 r_param = bp->link_vars.line_speed / 8;
2181 u32 fair_periodic_timeout_usec;
2182 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002183
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002184 memset(&(bp->cmng.rs_vars), 0,
2185 sizeof(struct rate_shaping_vars_per_port));
2186 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002187
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002188 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2189 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002190
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002191 /* this is the threshold below which no timer arming will occur
2192 1.25 coefficient is for the threshold to be a little bigger
2193 than the real time, to compensate for timer in-accuracy */
2194 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002195 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2196
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002197 /* resolution of fairness timer */
2198 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2199 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2200 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002201
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002202 /* this is the threshold below which we won't arm the timer anymore */
2203 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002204
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002205 /* we multiply by 1e3/8 to get bytes/msec.
2206 We don't want the credits to pass a credit
2207 of the t_fair*FAIR_MEM (algorithm resolution) */
2208 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2209 /* since each tick is 4 usec */
2210 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002211}
2212
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002213static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002214{
2215 struct rate_shaping_vars_per_vn m_rs_vn;
2216 struct fairness_vars_per_vn m_fair_vn;
2217 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2218 u16 vn_min_rate, vn_max_rate;
2219 int i;
2220
2221 /* If function is hidden - set min and max to zeroes */
2222 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2223 vn_min_rate = 0;
2224 vn_max_rate = 0;
2225
2226 } else {
2227 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2228 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002229 /* If fairness is enabled (not all min rates are zeroes) and
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002230 if current min rate is zero - set it to 1.
Eilon Greenstein33471622008-08-13 15:59:08 -07002231 This is a requirement of the algorithm. */
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002232 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002233 vn_min_rate = DEF_MIN_RATE;
2234 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2235 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2236 }
2237
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002238 DP(NETIF_MSG_IFUP,
2239 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2240 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002241
2242 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2243 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2244
2245 /* global vn counter - maximal Mbps for this vn */
2246 m_rs_vn.vn_counter.rate = vn_max_rate;
2247
2248 /* quota - number of bytes transmitted in this period */
2249 m_rs_vn.vn_counter.quota =
2250 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2251
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002252 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002253 /* credit for each period of the fairness algorithm:
2254 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002255 vn_weight_sum should not be larger than 10000, thus
2256 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2257 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002258 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002259 max((u32)(vn_min_rate * (T_FAIR_COEF /
2260 (8 * bp->vn_weight_sum))),
2261 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002262 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2263 m_fair_vn.vn_credit_delta);
2264 }
2265
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002266 /* Store it to internal memory */
2267 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2268 REG_WR(bp, BAR_XSTRORM_INTMEM +
2269 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2270 ((u32 *)(&m_rs_vn))[i]);
2271
2272 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2273 REG_WR(bp, BAR_XSTRORM_INTMEM +
2274 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2275 ((u32 *)(&m_fair_vn))[i]);
2276}
2277
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002279/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002280static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002281{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002282 /* Make sure that we are synced with the current statistics */
2283 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2284
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002285 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002286
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002287 if (bp->link_vars.link_up) {
2288
Eilon Greenstein1c063282009-02-12 08:36:43 +00002289 /* dropless flow control */
2290 if (CHIP_IS_E1H(bp)) {
2291 int port = BP_PORT(bp);
2292 u32 pause_enabled = 0;
2293
2294 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2295 pause_enabled = 1;
2296
2297 REG_WR(bp, BAR_USTRORM_INTMEM +
2298 USTORM_PAUSE_ENABLED_OFFSET(port),
2299 pause_enabled);
2300 }
2301
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002302 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2303 struct host_port_stats *pstats;
2304
2305 pstats = bnx2x_sp(bp, port_stats);
2306 /* reset old bmac stats */
2307 memset(&(pstats->mac_stx[0]), 0,
2308 sizeof(struct mac_stx));
2309 }
2310 if ((bp->state == BNX2X_STATE_OPEN) ||
2311 (bp->state == BNX2X_STATE_DISABLED))
2312 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2313 }
2314
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002315 /* indicate link status */
2316 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002317
2318 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002319 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002320 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002321 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002322
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2324 if (vn == BP_E1HVN(bp))
2325 continue;
2326
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002327 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002328
2329 /* Set the attention towards other drivers
2330 on the same port */
2331 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2332 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2333 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002334
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335 if (bp->link_vars.link_up) {
2336 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002337
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002338 /* Init rate shaping and fairness contexts */
2339 bnx2x_init_port_minmax(bp);
2340
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002341 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002342 bnx2x_init_vn_minmax(bp, 2*vn + port);
2343
2344 /* Store it to internal memory */
2345 for (i = 0;
2346 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2349 ((u32 *)(&bp->cmng))[i]);
2350 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002351 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002352}
2353
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002354static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002355{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002356 if (bp->state != BNX2X_STATE_OPEN)
2357 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002358
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002359 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2360
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002361 if (bp->link_vars.link_up)
2362 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2363 else
2364 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2365
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002366 /* indicate link status */
2367 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002368}
2369
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002370static void bnx2x_pmf_update(struct bnx2x *bp)
2371{
2372 int port = BP_PORT(bp);
2373 u32 val;
2374
2375 bp->port.pmf = 1;
2376 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2377
2378 /* enable nig attention */
2379 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2380 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2381 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002382
2383 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002384}
2385
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002386/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002387
2388/* slow path */
2389
2390/*
2391 * General service functions
2392 */
2393
2394/* the slow path queue is odd since completions arrive on the fastpath ring */
2395static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2396 u32 data_hi, u32 data_lo, int common)
2397{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002398 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002399
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002400 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2401 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002402 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2403 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2404 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2405
2406#ifdef BNX2X_STOP_ON_ERROR
2407 if (unlikely(bp->panic))
2408 return -EIO;
2409#endif
2410
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002411 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002412
2413 if (!bp->spq_left) {
2414 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002415 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002416 bnx2x_panic();
2417 return -EBUSY;
2418 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002419
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002420 /* CID needs port number to be encoded int it */
2421 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2422 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2423 HW_CID(bp, cid)));
2424 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2425 if (common)
2426 bp->spq_prod_bd->hdr.type |=
2427 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2428
2429 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2430 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2431
2432 bp->spq_left--;
2433
2434 if (bp->spq_prod_bd == bp->spq_last_bd) {
2435 bp->spq_prod_bd = bp->spq;
2436 bp->spq_prod_idx = 0;
2437 DP(NETIF_MSG_TIMER, "end of spq\n");
2438
2439 } else {
2440 bp->spq_prod_bd++;
2441 bp->spq_prod_idx++;
2442 }
2443
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00002444 /* Make sure that BD data is updated before writing the producer */
2445 wmb();
2446
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002447 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002448 bp->spq_prod_idx);
2449
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00002450 mmiowb();
2451
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002452 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002453 return 0;
2454}
2455
2456/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002457static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002458{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002459 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002460 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002461
2462 might_sleep();
2463 i = 100;
2464 for (j = 0; j < i*10; j++) {
2465 val = (1UL << 31);
2466 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2467 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2468 if (val & (1L << 31))
2469 break;
2470
2471 msleep(5);
2472 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002473 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002474 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002475 rc = -EBUSY;
2476 }
2477
2478 return rc;
2479}
2480
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002481/* release split MCP access lock register */
2482static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002483{
2484 u32 val = 0;
2485
2486 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2487}
2488
2489static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2490{
2491 struct host_def_status_block *def_sb = bp->def_status_blk;
2492 u16 rc = 0;
2493
2494 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002495 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2496 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2497 rc |= 1;
2498 }
2499 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2500 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2501 rc |= 2;
2502 }
2503 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2504 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2505 rc |= 4;
2506 }
2507 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2508 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2509 rc |= 8;
2510 }
2511 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2512 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2513 rc |= 16;
2514 }
2515 return rc;
2516}
2517
2518/*
2519 * slow path service functions
2520 */
2521
2522static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2523{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002524 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002525 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2526 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002527 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2528 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002529 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2530 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002531 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002532 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002533
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002534 if (bp->attn_state & asserted)
2535 BNX2X_ERR("IGU ERROR\n");
2536
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002537 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2538 aeu_mask = REG_RD(bp, aeu_addr);
2539
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002540 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002541 aeu_mask, asserted);
2542 aeu_mask &= ~(asserted & 0xff);
2543 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002544
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002545 REG_WR(bp, aeu_addr, aeu_mask);
2546 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002547
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002548 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002549 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002550 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002551
2552 if (asserted & ATTN_HARD_WIRED_MASK) {
2553 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002554
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002555 bnx2x_acquire_phy_lock(bp);
2556
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002557 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002558 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002559 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002560
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002561 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002562
2563 /* handle unicore attn? */
2564 }
2565 if (asserted & ATTN_SW_TIMER_4_FUNC)
2566 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2567
2568 if (asserted & GPIO_2_FUNC)
2569 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2570
2571 if (asserted & GPIO_3_FUNC)
2572 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2573
2574 if (asserted & GPIO_4_FUNC)
2575 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2576
2577 if (port == 0) {
2578 if (asserted & ATTN_GENERAL_ATTN_1) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2581 }
2582 if (asserted & ATTN_GENERAL_ATTN_2) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2585 }
2586 if (asserted & ATTN_GENERAL_ATTN_3) {
2587 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2588 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2589 }
2590 } else {
2591 if (asserted & ATTN_GENERAL_ATTN_4) {
2592 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2593 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2594 }
2595 if (asserted & ATTN_GENERAL_ATTN_5) {
2596 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2598 }
2599 if (asserted & ATTN_GENERAL_ATTN_6) {
2600 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2601 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2602 }
2603 }
2604
2605 } /* if hardwired */
2606
Eilon Greenstein5c862842008-08-13 15:51:48 -07002607 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2608 asserted, hc_addr);
2609 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002610
2611 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002612 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002613 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002614 bnx2x_release_phy_lock(bp);
2615 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002616}
2617
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002618static inline void bnx2x_fan_failure(struct bnx2x *bp)
2619{
2620 int port = BP_PORT(bp);
2621
2622 /* mark the failure */
2623 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2625 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2626 bp->link_params.ext_phy_config);
2627
2628 /* log the failure */
2629 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2630 " the driver to shutdown the card to prevent permanent"
2631 " damage. Please contact Dell Support for assistance\n",
2632 bp->dev->name);
2633}
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002634static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2635{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002636 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002637 int reg_offset;
2638 u32 val;
2639
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002640 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2641 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002642
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002643 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002644
2645 val = REG_RD(bp, reg_offset);
2646 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2647 REG_WR(bp, reg_offset, val);
2648
2649 BNX2X_ERR("SPIO5 hw attention\n");
2650
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002651 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002652 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2653 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002654 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002655 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002656 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002657 /* The PHY reset is controlled by GPIO 1 */
2658 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2659 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002660 break;
2661
2662 default:
2663 break;
2664 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002665 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002666 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002667
Eilon Greenstein589abe32009-02-12 08:36:55 +00002668 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2669 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2670 bnx2x_acquire_phy_lock(bp);
2671 bnx2x_handle_module_detect_int(&bp->link_params);
2672 bnx2x_release_phy_lock(bp);
2673 }
2674
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002675 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2676
2677 val = REG_RD(bp, reg_offset);
2678 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2679 REG_WR(bp, reg_offset, val);
2680
2681 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2682 (attn & HW_INTERRUT_ASSERT_SET_0));
2683 bnx2x_panic();
2684 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002685}
2686
2687static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2688{
2689 u32 val;
2690
Eilon Greenstein0626b892009-02-12 08:38:14 +00002691 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002692
2693 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2694 BNX2X_ERR("DB hw attention 0x%x\n", val);
2695 /* DORQ discard attention */
2696 if (val & 0x2)
2697 BNX2X_ERR("FATAL error from DORQ\n");
2698 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002699
2700 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2701
2702 int port = BP_PORT(bp);
2703 int reg_offset;
2704
2705 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2706 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2707
2708 val = REG_RD(bp, reg_offset);
2709 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2710 REG_WR(bp, reg_offset, val);
2711
2712 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2713 (attn & HW_INTERRUT_ASSERT_SET_1));
2714 bnx2x_panic();
2715 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002716}
2717
2718static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2719{
2720 u32 val;
2721
2722 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2723
2724 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2725 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2726 /* CFC error attention */
2727 if (val & 0x2)
2728 BNX2X_ERR("FATAL error from CFC\n");
2729 }
2730
2731 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2732
2733 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2734 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2735 /* RQ_USDMDP_FIFO_OVERFLOW */
2736 if (val & 0x18000)
2737 BNX2X_ERR("FATAL error from PXP\n");
2738 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002739
2740 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2741
2742 int port = BP_PORT(bp);
2743 int reg_offset;
2744
2745 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2746 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2747
2748 val = REG_RD(bp, reg_offset);
2749 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2750 REG_WR(bp, reg_offset, val);
2751
2752 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2753 (attn & HW_INTERRUT_ASSERT_SET_2));
2754 bnx2x_panic();
2755 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002756}
2757
2758static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2759{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002760 u32 val;
2761
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002762 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002764 if (attn & BNX2X_PMF_LINK_ASSERT) {
2765 int func = BP_FUNC(bp);
2766
2767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2768 bnx2x__link_status_update(bp);
2769 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2770 DRV_STATUS_PMF)
2771 bnx2x_pmf_update(bp);
2772
2773 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002774
2775 BNX2X_ERR("MC assert!\n");
2776 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2777 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2778 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2779 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2780 bnx2x_panic();
2781
2782 } else if (attn & BNX2X_MCP_ASSERT) {
2783
2784 BNX2X_ERR("MCP assert!\n");
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002786 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002787
2788 } else
2789 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2790 }
2791
2792 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002793 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2794 if (attn & BNX2X_GRC_TIMEOUT) {
2795 val = CHIP_IS_E1H(bp) ?
2796 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2797 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2798 }
2799 if (attn & BNX2X_GRC_RSV) {
2800 val = CHIP_IS_E1H(bp) ?
2801 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2802 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2803 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002804 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002805 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806}
2807
2808static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2809{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002810 struct attn_route attn;
2811 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002812 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002813 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002814 u32 reg_addr;
2815 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002816 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002817
2818 /* need to take HW lock because MCP or other port might also
2819 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002820 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821
2822 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2823 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2824 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2825 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002826 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2827 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002828
2829 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2830 if (deasserted & (1 << index)) {
2831 group_mask = bp->attn_group[index];
2832
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002833 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2834 index, group_mask.sig[0], group_mask.sig[1],
2835 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002836
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002837 bnx2x_attn_int_deasserted3(bp,
2838 attn.sig[3] & group_mask.sig[3]);
2839 bnx2x_attn_int_deasserted1(bp,
2840 attn.sig[1] & group_mask.sig[1]);
2841 bnx2x_attn_int_deasserted2(bp,
2842 attn.sig[2] & group_mask.sig[2]);
2843 bnx2x_attn_int_deasserted0(bp,
2844 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845
2846 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002847 HW_PRTY_ASSERT_SET_0) ||
2848 (attn.sig[1] & group_mask.sig[1] &
2849 HW_PRTY_ASSERT_SET_1) ||
2850 (attn.sig[2] & group_mask.sig[2] &
2851 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07002852 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002853 }
2854 }
2855
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002856 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002857
Eilon Greenstein5c862842008-08-13 15:51:48 -07002858 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002859
2860 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002861 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2862 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002863 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002864
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002865 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002866 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002867
2868 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2869 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2870
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002871 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2872 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002873
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002874 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2875 aeu_mask, deasserted);
2876 aeu_mask |= (deasserted & 0xff);
2877 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2878
2879 REG_WR(bp, reg_addr, aeu_mask);
2880 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002881
2882 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2883 bp->attn_state &= ~deasserted;
2884 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2885}
2886
2887static void bnx2x_attn_int(struct bnx2x *bp)
2888{
2889 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08002890 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2891 attn_bits);
2892 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2893 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002894 u32 attn_state = bp->attn_state;
2895
2896 /* look for changed bits */
2897 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2898 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2899
2900 DP(NETIF_MSG_HW,
2901 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2902 attn_bits, attn_ack, asserted, deasserted);
2903
2904 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002905 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002906
2907 /* handle bits that were raised */
2908 if (asserted)
2909 bnx2x_attn_int_asserted(bp, asserted);
2910
2911 if (deasserted)
2912 bnx2x_attn_int_deasserted(bp, deasserted);
2913}
2914
2915static void bnx2x_sp_task(struct work_struct *work)
2916{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002917 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002918 u16 status;
2919
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002920
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002921 /* Return here if interrupt is disabled */
2922 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002923 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002924 return;
2925 }
2926
2927 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002928/* if (status == 0) */
2929/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002930
Eilon Greenstein3196a882008-08-13 15:58:49 -07002931 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002932
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002933 /* HW attentions */
2934 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002935 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002936
Eilon Greenstein68d59482009-01-14 21:27:36 -08002937 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002938 IGU_INT_NOP, 1);
2939 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2940 IGU_INT_NOP, 1);
2941 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2942 IGU_INT_NOP, 1);
2943 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2944 IGU_INT_NOP, 1);
2945 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2946 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002947
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002948}
2949
2950static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2951{
2952 struct net_device *dev = dev_instance;
2953 struct bnx2x *bp = netdev_priv(dev);
2954
2955 /* Return here if interrupt is disabled */
2956 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07002957 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002958 return IRQ_HANDLED;
2959 }
2960
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08002961 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002962
2963#ifdef BNX2X_STOP_ON_ERROR
2964 if (unlikely(bp->panic))
2965 return IRQ_HANDLED;
2966#endif
2967
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08002968 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002969
2970 return IRQ_HANDLED;
2971}
2972
2973/* end of slow path */
2974
2975/* Statistics */
2976
2977/****************************************************************************
2978* Macros
2979****************************************************************************/
2980
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002981/* sum[hi:lo] += add[hi:lo] */
2982#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2983 do { \
2984 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08002985 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002986 } while (0)
2987
2988/* difference = minuend - subtrahend */
2989#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2990 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002991 if (m_lo < s_lo) { \
2992 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002993 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002994 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002995 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002996 d_hi--; \
2997 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002998 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07002999 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003000 d_hi = 0; \
3001 d_lo = 0; \
3002 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003003 } else { \
3004 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003005 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003006 d_hi = 0; \
3007 d_lo = 0; \
3008 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003009 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003010 d_hi = m_hi - s_hi; \
3011 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003012 } \
3013 } \
3014 } while (0)
3015
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003016#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003017 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003018 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3019 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3020 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3021 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3022 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3023 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003024 } while (0)
3025
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003026#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003027 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003028 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3029 diff.lo, new->s##_lo, old->s##_lo); \
3030 ADD_64(estats->t##_hi, diff.hi, \
3031 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003032 } while (0)
3033
3034/* sum[hi:lo] += add */
3035#define ADD_EXTEND_64(s_hi, s_lo, a) \
3036 do { \
3037 s_lo += a; \
3038 s_hi += (s_lo < a) ? 1 : 0; \
3039 } while (0)
3040
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003041#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003042 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003043 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3044 pstats->mac_stx[1].s##_lo, \
3045 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003046 } while (0)
3047
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003048#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003049 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003050 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3051 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003052 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3053 } while (0)
3054
3055#define UPDATE_EXTEND_USTAT(s, t) \
3056 do { \
3057 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058 old_uclient->s = uclient->s; \
3059 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003060 } while (0)
3061
3062#define UPDATE_EXTEND_XSTAT(s, t) \
3063 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003064 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3065 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003066 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3067 } while (0)
3068
3069/* minuend -= subtrahend */
3070#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3071 do { \
3072 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3073 } while (0)
3074
3075/* minuend[hi:lo] -= subtrahend */
3076#define SUB_EXTEND_64(m_hi, m_lo, s) \
3077 do { \
3078 SUB_64(m_hi, 0, m_lo, s); \
3079 } while (0)
3080
3081#define SUB_EXTEND_USTAT(s, t) \
3082 do { \
3083 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3084 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003085 } while (0)
3086
3087/*
3088 * General service functions
3089 */
3090
3091static inline long bnx2x_hilo(u32 *hiref)
3092{
3093 u32 lo = *(hiref + 1);
3094#if (BITS_PER_LONG == 64)
3095 u32 hi = *hiref;
3096
3097 return HILO_U64(hi, lo);
3098#else
3099 return lo;
3100#endif
3101}
3102
3103/*
3104 * Init service functions
3105 */
3106
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003107static void bnx2x_storm_stats_post(struct bnx2x *bp)
3108{
3109 if (!bp->stats_pending) {
3110 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003111 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003112
3113 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003114 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003115 for_each_queue(bp, i)
3116 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003117
3118 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3119 ((u32 *)&ramrod_data)[1],
3120 ((u32 *)&ramrod_data)[0], 0);
3121 if (rc == 0) {
3122 /* stats ramrod has it's own slot on the spq */
3123 bp->spq_left++;
3124 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003125 }
3126 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003127}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003128
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003129static void bnx2x_stats_init(struct bnx2x *bp)
3130{
3131 int port = BP_PORT(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003132 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003133
Eilon Greensteinde832a52009-02-12 08:36:33 +00003134 bp->stats_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003135 bp->executer_idx = 0;
3136 bp->stats_counter = 0;
3137
3138 /* port stats */
3139 if (!BP_NOMCP(bp))
3140 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3141 else
3142 bp->port.port_stx = 0;
3143 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3144
3145 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3146 bp->port.old_nig_stats.brb_discard =
3147 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003148 bp->port.old_nig_stats.brb_truncate =
3149 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003150 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3151 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3152 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3153 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3154
3155 /* function stats */
Eilon Greensteinde832a52009-02-12 08:36:33 +00003156 for_each_queue(bp, i) {
3157 struct bnx2x_fastpath *fp = &bp->fp[i];
3158
3159 memset(&fp->old_tclient, 0,
3160 sizeof(struct tstorm_per_client_stats));
3161 memset(&fp->old_uclient, 0,
3162 sizeof(struct ustorm_per_client_stats));
3163 memset(&fp->old_xclient, 0,
3164 sizeof(struct xstorm_per_client_stats));
3165 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3166 }
3167
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003168 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003169 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3170
3171 bp->stats_state = STATS_STATE_DISABLED;
3172 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3173 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3174}
3175
3176static void bnx2x_hw_stats_post(struct bnx2x *bp)
3177{
3178 struct dmae_command *dmae = &bp->stats_dmae;
3179 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003182 if (CHIP_REV_IS_SLOW(bp))
3183 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003184
3185 /* loader */
3186 if (bp->executer_idx) {
3187 int loader_idx = PMF_DMAE_C(bp);
3188
3189 memset(dmae, 0, sizeof(struct dmae_command));
3190
3191 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193 DMAE_CMD_DST_RESET |
3194#ifdef __BIG_ENDIAN
3195 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196#else
3197 DMAE_CMD_ENDIANITY_DW_SWAP |
3198#endif
3199 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3200 DMAE_CMD_PORT_0) |
3201 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3202 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3203 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3204 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3205 sizeof(struct dmae_command) *
3206 (loader_idx + 1)) >> 2;
3207 dmae->dst_addr_hi = 0;
3208 dmae->len = sizeof(struct dmae_command) >> 2;
3209 if (CHIP_IS_E1(bp))
3210 dmae->len--;
3211 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3212 dmae->comp_addr_hi = 0;
3213 dmae->comp_val = 1;
3214
3215 *stats_comp = 0;
3216 bnx2x_post_dmae(bp, dmae, loader_idx);
3217
3218 } else if (bp->func_stx) {
3219 *stats_comp = 0;
3220 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3221 }
3222}
3223
3224static int bnx2x_stats_comp(struct bnx2x *bp)
3225{
3226 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3227 int cnt = 10;
3228
3229 might_sleep();
3230 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003231 if (!cnt) {
3232 BNX2X_ERR("timeout waiting for stats finished\n");
3233 break;
3234 }
3235 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003236 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003237 }
3238 return 1;
3239}
3240
3241/*
3242 * Statistics service functions
3243 */
3244
3245static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3246{
3247 struct dmae_command *dmae;
3248 u32 opcode;
3249 int loader_idx = PMF_DMAE_C(bp);
3250 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3251
3252 /* sanity */
3253 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3254 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003255 return;
3256 }
3257
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003258 bp->executer_idx = 0;
3259
3260 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3261 DMAE_CMD_C_ENABLE |
3262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3263#ifdef __BIG_ENDIAN
3264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3265#else
3266 DMAE_CMD_ENDIANITY_DW_SWAP |
3267#endif
3268 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3269 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3270
3271 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3272 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3273 dmae->src_addr_lo = bp->port.port_stx >> 2;
3274 dmae->src_addr_hi = 0;
3275 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3276 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->len = DMAE_LEN32_RD_MAX;
3278 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3279 dmae->comp_addr_hi = 0;
3280 dmae->comp_val = 1;
3281
3282 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3283 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3284 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3285 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3287 DMAE_LEN32_RD_MAX * 4);
3288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3289 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003290 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3293 dmae->comp_val = DMAE_COMP_VAL;
3294
3295 *stats_comp = 0;
3296 bnx2x_hw_stats_post(bp);
3297 bnx2x_stats_comp(bp);
3298}
3299
3300static void bnx2x_port_stats_init(struct bnx2x *bp)
3301{
3302 struct dmae_command *dmae;
3303 int port = BP_PORT(bp);
3304 int vn = BP_E1HVN(bp);
3305 u32 opcode;
3306 int loader_idx = PMF_DMAE_C(bp);
3307 u32 mac_addr;
3308 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3309
3310 /* sanity */
3311 if (!bp->link_vars.link_up || !bp->port.pmf) {
3312 BNX2X_ERR("BUG!\n");
3313 return;
3314 }
3315
3316 bp->executer_idx = 0;
3317
3318 /* MCP */
3319 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3320 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3321 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3322#ifdef __BIG_ENDIAN
3323 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3324#else
3325 DMAE_CMD_ENDIANITY_DW_SWAP |
3326#endif
3327 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3328 (vn << DMAE_CMD_E1HVN_SHIFT));
3329
3330 if (bp->port.port_stx) {
3331
3332 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333 dmae->opcode = opcode;
3334 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3335 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3336 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3337 dmae->dst_addr_hi = 0;
3338 dmae->len = sizeof(struct host_port_stats) >> 2;
3339 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3340 dmae->comp_addr_hi = 0;
3341 dmae->comp_val = 1;
3342 }
3343
3344 if (bp->func_stx) {
3345
3346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347 dmae->opcode = opcode;
3348 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3349 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3350 dmae->dst_addr_lo = bp->func_stx >> 2;
3351 dmae->dst_addr_hi = 0;
3352 dmae->len = sizeof(struct host_func_stats) >> 2;
3353 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3354 dmae->comp_addr_hi = 0;
3355 dmae->comp_val = 1;
3356 }
3357
3358 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003359 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3360 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3361 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3362#ifdef __BIG_ENDIAN
3363 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3364#else
3365 DMAE_CMD_ENDIANITY_DW_SWAP |
3366#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003367 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3368 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003369
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003370 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003371
3372 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3373 NIG_REG_INGRESS_BMAC0_MEM);
3374
3375 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3376 BIGMAC_REGISTER_TX_STAT_GTBYT */
3377 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378 dmae->opcode = opcode;
3379 dmae->src_addr_lo = (mac_addr +
3380 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3381 dmae->src_addr_hi = 0;
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3383 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3384 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3385 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3386 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3387 dmae->comp_addr_hi = 0;
3388 dmae->comp_val = 1;
3389
3390 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3391 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3392 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3393 dmae->opcode = opcode;
3394 dmae->src_addr_lo = (mac_addr +
3395 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3396 dmae->src_addr_hi = 0;
3397 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003398 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003399 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003400 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003401 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3402 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003407 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003408
3409 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3410
3411 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3412 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3413 dmae->opcode = opcode;
3414 dmae->src_addr_lo = (mac_addr +
3415 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3416 dmae->src_addr_hi = 0;
3417 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3418 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3419 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3420 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3421 dmae->comp_addr_hi = 0;
3422 dmae->comp_val = 1;
3423
3424 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3425 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426 dmae->opcode = opcode;
3427 dmae->src_addr_lo = (mac_addr +
3428 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3429 dmae->src_addr_hi = 0;
3430 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003431 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003432 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003433 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003434 dmae->len = 1;
3435 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3436 dmae->comp_addr_hi = 0;
3437 dmae->comp_val = 1;
3438
3439 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3440 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3441 dmae->opcode = opcode;
3442 dmae->src_addr_lo = (mac_addr +
3443 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3444 dmae->src_addr_hi = 0;
3445 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003446 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003447 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003448 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003449 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3450 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3451 dmae->comp_addr_hi = 0;
3452 dmae->comp_val = 1;
3453 }
3454
3455 /* NIG */
3456 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003457 dmae->opcode = opcode;
3458 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3459 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3460 dmae->src_addr_hi = 0;
3461 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3462 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3463 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467
3468 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3469 dmae->opcode = opcode;
3470 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3471 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3472 dmae->src_addr_hi = 0;
3473 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3474 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3476 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3477 dmae->len = (2*sizeof(u32)) >> 2;
3478 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3479 dmae->comp_addr_hi = 0;
3480 dmae->comp_val = 1;
3481
3482 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003483 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3484 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3485 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3486#ifdef __BIG_ENDIAN
3487 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3488#else
3489 DMAE_CMD_ENDIANITY_DW_SWAP |
3490#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003491 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3492 (vn << DMAE_CMD_E1HVN_SHIFT));
3493 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3494 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003495 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003496 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3497 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3498 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3499 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3500 dmae->len = (2*sizeof(u32)) >> 2;
3501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3503 dmae->comp_val = DMAE_COMP_VAL;
3504
3505 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003506}
3507
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003508static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003509{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003510 struct dmae_command *dmae = &bp->stats_dmae;
3511 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003512
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003513 /* sanity */
3514 if (!bp->func_stx) {
3515 BNX2X_ERR("BUG!\n");
3516 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003517 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003518
3519 bp->executer_idx = 0;
3520 memset(dmae, 0, sizeof(struct dmae_command));
3521
3522 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3523 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3524 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3525#ifdef __BIG_ENDIAN
3526 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3527#else
3528 DMAE_CMD_ENDIANITY_DW_SWAP |
3529#endif
3530 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3531 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3532 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3533 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3534 dmae->dst_addr_lo = bp->func_stx >> 2;
3535 dmae->dst_addr_hi = 0;
3536 dmae->len = sizeof(struct host_func_stats) >> 2;
3537 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3538 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3539 dmae->comp_val = DMAE_COMP_VAL;
3540
3541 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003542}
3543
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003544static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003545{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003546 if (bp->port.pmf)
3547 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003548
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003549 else if (bp->func_stx)
3550 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003551
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003552 bnx2x_hw_stats_post(bp);
3553 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003554}
3555
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003556static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003557{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003558 bnx2x_stats_comp(bp);
3559 bnx2x_stats_pmf_update(bp);
3560 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003561}
3562
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003563static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003564{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003565 bnx2x_stats_comp(bp);
3566 bnx2x_stats_start(bp);
3567}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003568
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003569static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3570{
3571 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3572 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003573 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003574 struct {
3575 u32 lo;
3576 u32 hi;
3577 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003578
3579 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3580 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3581 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3582 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3583 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3584 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003585 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003586 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003587 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003588 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3589 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3590 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3591 UPDATE_STAT64(tx_stat_gt127,
3592 tx_stat_etherstatspkts65octetsto127octets);
3593 UPDATE_STAT64(tx_stat_gt255,
3594 tx_stat_etherstatspkts128octetsto255octets);
3595 UPDATE_STAT64(tx_stat_gt511,
3596 tx_stat_etherstatspkts256octetsto511octets);
3597 UPDATE_STAT64(tx_stat_gt1023,
3598 tx_stat_etherstatspkts512octetsto1023octets);
3599 UPDATE_STAT64(tx_stat_gt1518,
3600 tx_stat_etherstatspkts1024octetsto1522octets);
3601 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3602 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3603 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3604 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3605 UPDATE_STAT64(tx_stat_gterr,
3606 tx_stat_dot3statsinternalmactransmiterrors);
3607 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003608
3609 estats->pause_frames_received_hi =
3610 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3611 estats->pause_frames_received_lo =
3612 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3613
3614 estats->pause_frames_sent_hi =
3615 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3616 estats->pause_frames_sent_lo =
3617 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003618}
3619
3620static void bnx2x_emac_stats_update(struct bnx2x *bp)
3621{
3622 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3623 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003625
3626 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3627 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3628 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3629 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3630 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3631 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3632 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3633 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3634 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3635 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3636 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3637 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3638 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3639 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3640 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3641 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3642 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3643 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3644 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3645 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3646 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3647 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3648 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3649 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3650 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3651 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3652 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3653 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3654 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3655 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3656 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003657
3658 estats->pause_frames_received_hi =
3659 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3660 estats->pause_frames_received_lo =
3661 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3662 ADD_64(estats->pause_frames_received_hi,
3663 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3664 estats->pause_frames_received_lo,
3665 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3666
3667 estats->pause_frames_sent_hi =
3668 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3669 estats->pause_frames_sent_lo =
3670 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3671 ADD_64(estats->pause_frames_sent_hi,
3672 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3673 estats->pause_frames_sent_lo,
3674 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003675}
3676
3677static int bnx2x_hw_stats_update(struct bnx2x *bp)
3678{
3679 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3680 struct nig_stats *old = &(bp->port.old_nig_stats);
3681 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3682 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003683 struct {
3684 u32 lo;
3685 u32 hi;
3686 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003687 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003688
3689 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3690 bnx2x_bmac_stats_update(bp);
3691
3692 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3693 bnx2x_emac_stats_update(bp);
3694
3695 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003696 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003697 return -1;
3698 }
3699
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003700 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3701 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003702 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3703 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003704
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003705 UPDATE_STAT64_NIG(egress_mac_pkt0,
3706 etherstatspkts1024octetsto1522octets);
3707 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003708
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003709 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003710
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003711 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3712 sizeof(struct mac_stx));
3713 estats->brb_drop_hi = pstats->brb_drop_hi;
3714 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003715
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003716 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003717
Eilon Greensteinde832a52009-02-12 08:36:33 +00003718 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3719 if (nig_timer_max != estats->nig_timer_max) {
3720 estats->nig_timer_max = nig_timer_max;
3721 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3722 }
3723
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003724 return 0;
3725}
3726
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003727static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003728{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003729 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003730 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003731 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003732 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3733 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003734 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003735
Eilon Greensteinde832a52009-02-12 08:36:33 +00003736 memset(&(fstats->total_bytes_received_hi), 0,
3737 sizeof(struct host_func_stats) - 2*sizeof(u32));
3738 estats->error_bytes_received_hi = 0;
3739 estats->error_bytes_received_lo = 0;
3740 estats->etherstatsoverrsizepkts_hi = 0;
3741 estats->etherstatsoverrsizepkts_lo = 0;
3742 estats->no_buff_discard_hi = 0;
3743 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003744
Eilon Greensteinde832a52009-02-12 08:36:33 +00003745 for_each_queue(bp, i) {
3746 struct bnx2x_fastpath *fp = &bp->fp[i];
3747 int cl_id = fp->cl_id;
3748 struct tstorm_per_client_stats *tclient =
3749 &stats->tstorm_common.client_statistics[cl_id];
3750 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3751 struct ustorm_per_client_stats *uclient =
3752 &stats->ustorm_common.client_statistics[cl_id];
3753 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3754 struct xstorm_per_client_stats *xclient =
3755 &stats->xstorm_common.client_statistics[cl_id];
3756 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3757 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3758 u32 diff;
3759
3760 /* are storm stats valid? */
3761 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3762 bp->stats_counter) {
3763 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3764 " xstorm counter (%d) != stats_counter (%d)\n",
3765 i, xclient->stats_counter, bp->stats_counter);
3766 return -1;
3767 }
3768 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3769 bp->stats_counter) {
3770 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3771 " tstorm counter (%d) != stats_counter (%d)\n",
3772 i, tclient->stats_counter, bp->stats_counter);
3773 return -2;
3774 }
3775 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3776 bp->stats_counter) {
3777 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3778 " ustorm counter (%d) != stats_counter (%d)\n",
3779 i, uclient->stats_counter, bp->stats_counter);
3780 return -4;
3781 }
3782
3783 qstats->total_bytes_received_hi =
3784 qstats->valid_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003785 le32_to_cpu(tclient->total_rcv_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003786 qstats->total_bytes_received_lo =
3787 qstats->valid_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003788 le32_to_cpu(tclient->total_rcv_bytes.lo);
3789
Eilon Greensteinde832a52009-02-12 08:36:33 +00003790 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003791 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003792 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003793 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003794
3795 ADD_64(qstats->total_bytes_received_hi,
3796 qstats->error_bytes_received_hi,
3797 qstats->total_bytes_received_lo,
3798 qstats->error_bytes_received_lo);
3799
3800 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3801 total_unicast_packets_received);
3802 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3803 total_multicast_packets_received);
3804 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3805 total_broadcast_packets_received);
3806 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3807 etherstatsoverrsizepkts);
3808 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3809
3810 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3811 total_unicast_packets_received);
3812 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3813 total_multicast_packets_received);
3814 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3815 total_broadcast_packets_received);
3816 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3817 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3818 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3819
3820 qstats->total_bytes_transmitted_hi =
3821 le32_to_cpu(xclient->total_sent_bytes.hi);
3822 qstats->total_bytes_transmitted_lo =
3823 le32_to_cpu(xclient->total_sent_bytes.lo);
3824
3825 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3826 total_unicast_packets_transmitted);
3827 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3828 total_multicast_packets_transmitted);
3829 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3830 total_broadcast_packets_transmitted);
3831
3832 old_tclient->checksum_discard = tclient->checksum_discard;
3833 old_tclient->ttl0_discard = tclient->ttl0_discard;
3834
3835 ADD_64(fstats->total_bytes_received_hi,
3836 qstats->total_bytes_received_hi,
3837 fstats->total_bytes_received_lo,
3838 qstats->total_bytes_received_lo);
3839 ADD_64(fstats->total_bytes_transmitted_hi,
3840 qstats->total_bytes_transmitted_hi,
3841 fstats->total_bytes_transmitted_lo,
3842 qstats->total_bytes_transmitted_lo);
3843 ADD_64(fstats->total_unicast_packets_received_hi,
3844 qstats->total_unicast_packets_received_hi,
3845 fstats->total_unicast_packets_received_lo,
3846 qstats->total_unicast_packets_received_lo);
3847 ADD_64(fstats->total_multicast_packets_received_hi,
3848 qstats->total_multicast_packets_received_hi,
3849 fstats->total_multicast_packets_received_lo,
3850 qstats->total_multicast_packets_received_lo);
3851 ADD_64(fstats->total_broadcast_packets_received_hi,
3852 qstats->total_broadcast_packets_received_hi,
3853 fstats->total_broadcast_packets_received_lo,
3854 qstats->total_broadcast_packets_received_lo);
3855 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3856 qstats->total_unicast_packets_transmitted_hi,
3857 fstats->total_unicast_packets_transmitted_lo,
3858 qstats->total_unicast_packets_transmitted_lo);
3859 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3860 qstats->total_multicast_packets_transmitted_hi,
3861 fstats->total_multicast_packets_transmitted_lo,
3862 qstats->total_multicast_packets_transmitted_lo);
3863 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3864 qstats->total_broadcast_packets_transmitted_hi,
3865 fstats->total_broadcast_packets_transmitted_lo,
3866 qstats->total_broadcast_packets_transmitted_lo);
3867 ADD_64(fstats->valid_bytes_received_hi,
3868 qstats->valid_bytes_received_hi,
3869 fstats->valid_bytes_received_lo,
3870 qstats->valid_bytes_received_lo);
3871
3872 ADD_64(estats->error_bytes_received_hi,
3873 qstats->error_bytes_received_hi,
3874 estats->error_bytes_received_lo,
3875 qstats->error_bytes_received_lo);
3876 ADD_64(estats->etherstatsoverrsizepkts_hi,
3877 qstats->etherstatsoverrsizepkts_hi,
3878 estats->etherstatsoverrsizepkts_lo,
3879 qstats->etherstatsoverrsizepkts_lo);
3880 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3881 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3882 }
3883
3884 ADD_64(fstats->total_bytes_received_hi,
3885 estats->rx_stat_ifhcinbadoctets_hi,
3886 fstats->total_bytes_received_lo,
3887 estats->rx_stat_ifhcinbadoctets_lo);
3888
3889 memcpy(estats, &(fstats->total_bytes_received_hi),
3890 sizeof(struct host_func_stats) - 2*sizeof(u32));
3891
3892 ADD_64(estats->etherstatsoverrsizepkts_hi,
3893 estats->rx_stat_dot3statsframestoolong_hi,
3894 estats->etherstatsoverrsizepkts_lo,
3895 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003896 ADD_64(estats->error_bytes_received_hi,
3897 estats->rx_stat_ifhcinbadoctets_hi,
3898 estats->error_bytes_received_lo,
3899 estats->rx_stat_ifhcinbadoctets_lo);
3900
Eilon Greensteinde832a52009-02-12 08:36:33 +00003901 if (bp->port.pmf) {
3902 estats->mac_filter_discard =
3903 le32_to_cpu(tport->mac_filter_discard);
3904 estats->xxoverflow_discard =
3905 le32_to_cpu(tport->xxoverflow_discard);
3906 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003907 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003908 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3909 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003910
3911 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3912
Eilon Greensteinde832a52009-02-12 08:36:33 +00003913 bp->stats_pending = 0;
3914
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003915 return 0;
3916}
3917
3918static void bnx2x_net_stats_update(struct bnx2x *bp)
3919{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003920 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003921 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003922 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003923
3924 nstats->rx_packets =
3925 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3926 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3927 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3928
3929 nstats->tx_packets =
3930 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3931 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3932 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3933
Eilon Greensteinde832a52009-02-12 08:36:33 +00003934 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003935
Eliezer Tamir0e39e642008-02-28 11:54:03 -08003936 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003937
Eilon Greensteinde832a52009-02-12 08:36:33 +00003938 nstats->rx_dropped = estats->mac_discard;
3939 for_each_queue(bp, i)
3940 nstats->rx_dropped +=
3941 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3942
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003943 nstats->tx_dropped = 0;
3944
3945 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003946 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003947
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003948 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003949 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003950
3951 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003952 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3953 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3954 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3955 bnx2x_hilo(&estats->brb_truncate_hi);
3956 nstats->rx_crc_errors =
3957 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3958 nstats->rx_frame_errors =
3959 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3960 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003961 nstats->rx_missed_errors = estats->xxoverflow_discard;
3962
3963 nstats->rx_errors = nstats->rx_length_errors +
3964 nstats->rx_over_errors +
3965 nstats->rx_crc_errors +
3966 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08003967 nstats->rx_fifo_errors +
3968 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003969
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003970 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003971 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3972 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3973 nstats->tx_carrier_errors =
3974 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003975 nstats->tx_fifo_errors = 0;
3976 nstats->tx_heartbeat_errors = 0;
3977 nstats->tx_window_errors = 0;
3978
3979 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00003980 nstats->tx_carrier_errors +
3981 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3982}
3983
3984static void bnx2x_drv_stats_update(struct bnx2x *bp)
3985{
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987 int i;
3988
3989 estats->driver_xoff = 0;
3990 estats->rx_err_discard_pkt = 0;
3991 estats->rx_skb_alloc_failed = 0;
3992 estats->hw_csum_err = 0;
3993 for_each_queue(bp, i) {
3994 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3995
3996 estats->driver_xoff += qstats->driver_xoff;
3997 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3998 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3999 estats->hw_csum_err += qstats->hw_csum_err;
4000 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004001}
4002
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004003static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004004{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004005 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004006
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004007 if (*stats_comp != DMAE_COMP_VAL)
4008 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004009
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004010 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004011 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004012
Eilon Greensteinde832a52009-02-12 08:36:33 +00004013 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4014 BNX2X_ERR("storm stats were not updated for 3 times\n");
4015 bnx2x_panic();
4016 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004017 }
4018
Eilon Greensteinde832a52009-02-12 08:36:33 +00004019 bnx2x_net_stats_update(bp);
4020 bnx2x_drv_stats_update(bp);
4021
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004022 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004023 struct tstorm_per_client_stats *old_tclient =
4024 &bp->fp->old_tclient;
4025 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004026 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004027 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004028 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004029
4030 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4031 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4032 " tx pkt (%lx)\n",
4033 bnx2x_tx_avail(bp->fp),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004034 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004035 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4036 " rx pkt (%lx)\n",
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004037 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4038 bp->fp->rx_comp_cons),
4039 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004040 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4041 "brb truncate %u\n",
4042 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4043 qstats->driver_xoff,
4044 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004045 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004046 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004047 "mac_discard %u mac_filter_discard %u "
4048 "xxovrflow_discard %u brb_truncate_discard %u "
4049 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004050 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004051 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4052 bnx2x_hilo(&qstats->no_buff_discard_hi),
4053 estats->mac_discard, estats->mac_filter_discard,
4054 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004055 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004056
4057 for_each_queue(bp, i) {
4058 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4059 bnx2x_fp(bp, i, tx_pkt),
4060 bnx2x_fp(bp, i, rx_pkt),
4061 bnx2x_fp(bp, i, rx_calls));
4062 }
4063 }
4064
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004065 bnx2x_hw_stats_post(bp);
4066 bnx2x_storm_stats_post(bp);
4067}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004068
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004069static void bnx2x_port_stats_stop(struct bnx2x *bp)
4070{
4071 struct dmae_command *dmae;
4072 u32 opcode;
4073 int loader_idx = PMF_DMAE_C(bp);
4074 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004075
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004076 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004077
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004078 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4079 DMAE_CMD_C_ENABLE |
4080 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004081#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004082 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004083#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004084 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004086 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4087 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4088
4089 if (bp->port.port_stx) {
4090
4091 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4092 if (bp->func_stx)
4093 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4094 else
4095 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4096 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4097 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4098 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004099 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004100 dmae->len = sizeof(struct host_port_stats) >> 2;
4101 if (bp->func_stx) {
4102 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4103 dmae->comp_addr_hi = 0;
4104 dmae->comp_val = 1;
4105 } else {
4106 dmae->comp_addr_lo =
4107 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108 dmae->comp_addr_hi =
4109 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4110 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004112 *stats_comp = 0;
4113 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114 }
4115
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004116 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004117
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004118 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4119 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4120 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4121 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4122 dmae->dst_addr_lo = bp->func_stx >> 2;
4123 dmae->dst_addr_hi = 0;
4124 dmae->len = sizeof(struct host_func_stats) >> 2;
4125 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4126 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4127 dmae->comp_val = DMAE_COMP_VAL;
4128
4129 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004130 }
4131}
4132
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004133static void bnx2x_stats_stop(struct bnx2x *bp)
4134{
4135 int update = 0;
4136
4137 bnx2x_stats_comp(bp);
4138
4139 if (bp->port.pmf)
4140 update = (bnx2x_hw_stats_update(bp) == 0);
4141
4142 update |= (bnx2x_storm_stats_update(bp) == 0);
4143
4144 if (update) {
4145 bnx2x_net_stats_update(bp);
4146
4147 if (bp->port.pmf)
4148 bnx2x_port_stats_stop(bp);
4149
4150 bnx2x_hw_stats_post(bp);
4151 bnx2x_stats_comp(bp);
4152 }
4153}
4154
4155static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4156{
4157}
4158
4159static const struct {
4160 void (*action)(struct bnx2x *bp);
4161 enum bnx2x_stats_state next_state;
4162} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4163/* state event */
4164{
4165/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4166/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4167/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4168/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4169},
4170{
4171/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4172/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4173/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4174/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4175}
4176};
4177
4178static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4179{
4180 enum bnx2x_stats_state state = bp->stats_state;
4181
4182 bnx2x_stats_stm[state][event].action(bp);
4183 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4184
4185 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4186 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4187 state, event, bp->stats_state);
4188}
4189
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004190static void bnx2x_timer(unsigned long data)
4191{
4192 struct bnx2x *bp = (struct bnx2x *) data;
4193
4194 if (!netif_running(bp->dev))
4195 return;
4196
4197 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004198 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004199
4200 if (poll) {
4201 struct bnx2x_fastpath *fp = &bp->fp[0];
4202 int rc;
4203
Eilon Greenstein7961f792009-03-02 07:59:31 +00004204 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004205 rc = bnx2x_rx_int(fp, 1000);
4206 }
4207
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004208 if (!BP_NOMCP(bp)) {
4209 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210 u32 drv_pulse;
4211 u32 mcp_pulse;
4212
4213 ++bp->fw_drv_pulse_wr_seq;
4214 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4215 /* TBD - add SYSTEM_TIME */
4216 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004217 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004218
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004219 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220 MCP_PULSE_SEQ_MASK);
4221 /* The delta between driver pulse and mcp response
4222 * should be 1 (before mcp response) or 0 (after mcp response)
4223 */
4224 if ((drv_pulse != mcp_pulse) &&
4225 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4226 /* someone lost a heartbeat... */
4227 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4228 drv_pulse, mcp_pulse);
4229 }
4230 }
4231
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004232 if ((bp->state == BNX2X_STATE_OPEN) ||
4233 (bp->state == BNX2X_STATE_DISABLED))
4234 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004235
Eliezer Tamirf1410642008-02-28 11:51:50 -08004236timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004237 mod_timer(&bp->timer, jiffies + bp->current_interval);
4238}
4239
4240/* end of Statistics */
4241
4242/* nic init */
4243
4244/*
4245 * nic init service functions
4246 */
4247
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004248static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004249{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004250 int port = BP_PORT(bp);
4251
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004252 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004253 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
Yitchak Gertner35302982008-08-13 15:53:12 -07004254 sizeof(struct ustorm_status_block)/4);
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004255 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004256 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
Yitchak Gertner35302982008-08-13 15:53:12 -07004257 sizeof(struct cstorm_status_block)/4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004258}
4259
Eilon Greenstein5c862842008-08-13 15:51:48 -07004260static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4261 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004262{
4263 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004264 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004266 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004267
4268 /* USTORM */
4269 section = ((u64)mapping) + offsetof(struct host_status_block,
4270 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004271 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004272
4273 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004274 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004275 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004276 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004277 U64_HI(section));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004278 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4279 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004280
4281 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4282 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004283 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004284
4285 /* CSTORM */
4286 section = ((u64)mapping) + offsetof(struct host_status_block,
4287 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004288 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289
4290 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004291 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004292 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004293 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004295 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4296 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004297
4298 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004300 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004301
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004302 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4303}
4304
4305static void bnx2x_zero_def_sb(struct bnx2x *bp)
4306{
4307 int func = BP_FUNC(bp);
4308
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004309 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004310 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4311 sizeof(struct tstorm_def_status_block)/4);
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004312 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4313 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4314 sizeof(struct ustorm_def_status_block)/4);
4315 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4316 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4317 sizeof(struct cstorm_def_status_block)/4);
4318 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4319 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4320 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004321}
4322
4323static void bnx2x_init_def_sb(struct bnx2x *bp,
4324 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004325 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004327 int port = BP_PORT(bp);
4328 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329 int index, val, reg_offset;
4330 u64 section;
4331
4332 /* ATTN */
4333 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4334 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004335 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004336
Eliezer Tamir49d66772008-02-28 11:53:13 -08004337 bp->attn_state = 0;
4338
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004339 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004342 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004343 bp->attn_group[index].sig[0] = REG_RD(bp,
4344 reg_offset + 0x10*index);
4345 bp->attn_group[index].sig[1] = REG_RD(bp,
4346 reg_offset + 0x4 + 0x10*index);
4347 bp->attn_group[index].sig[2] = REG_RD(bp,
4348 reg_offset + 0x8 + 0x10*index);
4349 bp->attn_group[index].sig[3] = REG_RD(bp,
4350 reg_offset + 0xc + 0x10*index);
4351 }
4352
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004353 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4354 HC_REG_ATTN_MSG0_ADDR_L);
4355
4356 REG_WR(bp, reg_offset, U64_LO(section));
4357 REG_WR(bp, reg_offset + 4, U64_HI(section));
4358
4359 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4360
4361 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004362 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004363 REG_WR(bp, reg_offset, val);
4364
4365 /* USTORM */
4366 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4367 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004368 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004369
4370 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004371 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004372 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004373 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004374 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004375 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004376 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004377
4378 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4379 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004380 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381
4382 /* CSTORM */
4383 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4384 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004385 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004386
4387 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004388 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004389 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004390 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004391 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004392 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004393 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004394
4395 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4396 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004397 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004398
4399 /* TSTORM */
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004402 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004403
4404 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004405 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004406 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004407 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004408 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004409 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004410 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004411
4412 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4413 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004414 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004415
4416 /* XSTORM */
4417 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4418 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004419 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004420
4421 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004422 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004423 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004424 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004425 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004426 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004427 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004428
4429 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4430 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004431 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004432
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004433 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004434 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004435
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004436 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004437}
4438
4439static void bnx2x_update_coalesce(struct bnx2x *bp)
4440{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004441 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004442 int i;
4443
4444 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004445 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004446
4447 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4448 REG_WR8(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004449 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004450 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004451 bp->rx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004452 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004453 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004454 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein3799cf42009-07-05 04:18:12 +00004455 (bp->rx_ticks/12) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004456
4457 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4458 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004459 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004460 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004461 bp->tx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004462 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004463 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
Eilon Greenstein5c862842008-08-13 15:51:48 -07004464 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein3799cf42009-07-05 04:18:12 +00004465 (bp->tx_ticks/12) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004466 }
4467}
4468
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004469static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4470 struct bnx2x_fastpath *fp, int last)
4471{
4472 int i;
4473
4474 for (i = 0; i < last; i++) {
4475 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4476 struct sk_buff *skb = rx_buf->skb;
4477
4478 if (skb == NULL) {
4479 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4480 continue;
4481 }
4482
4483 if (fp->tpa_state[i] == BNX2X_TPA_START)
4484 pci_unmap_single(bp->pdev,
4485 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004486 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004487
4488 dev_kfree_skb(skb);
4489 rx_buf->skb = NULL;
4490 }
4491}
4492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004493static void bnx2x_init_rx_rings(struct bnx2x *bp)
4494{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004495 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004496 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4497 ETH_MAX_AGGREGATION_QUEUES_E1H;
4498 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004499 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004500
Eilon Greenstein87942b42009-02-12 08:36:49 +00004501 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004502 DP(NETIF_MSG_IFUP,
4503 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004504
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004505 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004506
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004507 for_each_rx_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004508 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004509
Eilon Greenstein32626232008-08-13 15:51:07 -07004510 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004511 fp->tpa_pool[i].skb =
4512 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4513 if (!fp->tpa_pool[i].skb) {
4514 BNX2X_ERR("Failed to allocate TPA "
4515 "skb pool for queue[%d] - "
4516 "disabling TPA on this "
4517 "queue!\n", j);
4518 bnx2x_free_tpa_pool(bp, fp, i);
4519 fp->disable_tpa = 1;
4520 break;
4521 }
4522 pci_unmap_addr_set((struct sw_rx_bd *)
4523 &bp->fp->tpa_pool[i],
4524 mapping, 0);
4525 fp->tpa_state[i] = BNX2X_TPA_STOP;
4526 }
4527 }
4528 }
4529
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004530 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004531 struct bnx2x_fastpath *fp = &bp->fp[j];
4532
4533 fp->rx_bd_cons = 0;
4534 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004535 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004536
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004537 /* "next page" elements initialization */
4538 /* SGE ring */
4539 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4540 struct eth_rx_sge *sge;
4541
4542 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4543 sge->addr_hi =
4544 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4545 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4546 sge->addr_lo =
4547 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4548 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4549 }
4550
4551 bnx2x_init_sge_ring_bit_mask(fp);
4552
4553 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004554 for (i = 1; i <= NUM_RX_RINGS; i++) {
4555 struct eth_rx_bd *rx_bd;
4556
4557 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4558 rx_bd->addr_hi =
4559 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004560 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004561 rx_bd->addr_lo =
4562 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004563 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004564 }
4565
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004566 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004567 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4568 struct eth_rx_cqe_next_page *nextpg;
4569
4570 nextpg = (struct eth_rx_cqe_next_page *)
4571 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4572 nextpg->addr_hi =
4573 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004574 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004575 nextpg->addr_lo =
4576 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004577 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004578 }
4579
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004580 /* Allocate SGEs and initialize the ring elements */
4581 for (i = 0, ring_prod = 0;
4582 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004583
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004584 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4585 BNX2X_ERR("was only able to allocate "
4586 "%d rx sges\n", i);
4587 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4588 /* Cleanup already allocated elements */
4589 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07004590 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004591 fp->disable_tpa = 1;
4592 ring_prod = 0;
4593 break;
4594 }
4595 ring_prod = NEXT_SGE_IDX(ring_prod);
4596 }
4597 fp->rx_sge_prod = ring_prod;
4598
4599 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004600 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004601 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004602 for (i = 0; i < bp->rx_ring_size; i++) {
4603 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4604 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004605 "%d rx skbs on queue[%d]\n", i, j);
4606 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607 break;
4608 }
4609 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004610 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07004611 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004612 }
4613
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004614 fp->rx_bd_prod = ring_prod;
4615 /* must not have more available CQEs than BDs */
4616 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4617 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004618 fp->rx_pkt = fp->rx_calls = 0;
4619
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004620 /* Warning!
4621 * this will generate an interrupt (to the TSTORM)
4622 * must only be done after chip is initialized
4623 */
4624 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4625 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004626 if (j != 0)
4627 continue;
4628
4629 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004630 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004631 U64_LO(fp->rx_comp_mapping));
4632 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004633 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004634 U64_HI(fp->rx_comp_mapping));
4635 }
4636}
4637
4638static void bnx2x_init_tx_ring(struct bnx2x *bp)
4639{
4640 int i, j;
4641
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004642 for_each_tx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004643 struct bnx2x_fastpath *fp = &bp->fp[j];
4644
4645 for (i = 1; i <= NUM_TX_RINGS; i++) {
4646 struct eth_tx_bd *tx_bd =
4647 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4648
4649 tx_bd->addr_hi =
4650 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004651 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004652 tx_bd->addr_lo =
4653 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004654 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004655 }
4656
4657 fp->tx_pkt_prod = 0;
4658 fp->tx_pkt_cons = 0;
4659 fp->tx_bd_prod = 0;
4660 fp->tx_bd_cons = 0;
4661 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4662 fp->tx_pkt = 0;
4663 }
4664}
4665
4666static void bnx2x_init_sp_ring(struct bnx2x *bp)
4667{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004669
4670 spin_lock_init(&bp->spq_lock);
4671
4672 bp->spq_left = MAX_SPQ_PENDING;
4673 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004674 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4675 bp->spq_prod_bd = bp->spq;
4676 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4677
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004678 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004679 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004680 REG_WR(bp,
4681 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004682 U64_HI(bp->spq_mapping));
4683
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004684 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685 bp->spq_prod_idx);
4686}
4687
4688static void bnx2x_init_context(struct bnx2x *bp)
4689{
4690 int i;
4691
4692 for_each_queue(bp, i) {
4693 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4694 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00004695 u8 cl_id = fp->cl_id;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004696 u8 sb_id = fp->sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004698 context->ustorm_st_context.common.sb_index_numbers =
4699 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00004700 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004701 context->ustorm_st_context.common.status_block_id = sb_id;
4702 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004703 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4704 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4705 context->ustorm_st_context.common.statistics_counter_id =
4706 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004707 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00004708 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004709 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07004710 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004711 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004712 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004713 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004715 if (!fp->disable_tpa) {
4716 context->ustorm_st_context.common.flags |=
4717 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4718 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4719 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004720 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4721 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004722 context->ustorm_st_context.common.sge_page_base_hi =
4723 U64_HI(fp->rx_sge_mapping);
4724 context->ustorm_st_context.common.sge_page_base_lo =
4725 U64_LO(fp->rx_sge_mapping);
4726 }
4727
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004728 context->ustorm_ag_context.cdu_usage =
4729 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4730 CDU_REGION_NUMBER_UCM_AG,
4731 ETH_CONNECTION_TYPE);
4732
4733 context->xstorm_st_context.tx_bd_page_base_hi =
4734 U64_HI(fp->tx_desc_mapping);
4735 context->xstorm_st_context.tx_bd_page_base_lo =
4736 U64_LO(fp->tx_desc_mapping);
4737 context->xstorm_st_context.db_data_addr_hi =
4738 U64_HI(fp->tx_prods_mapping);
4739 context->xstorm_st_context.db_data_addr_lo =
4740 U64_LO(fp->tx_prods_mapping);
Eilon Greenstein0626b892009-02-12 08:38:14 +00004741 context->xstorm_st_context.statistics_data = (cl_id |
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004742 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743 context->cstorm_st_context.sb_index_number =
Eilon Greenstein5c862842008-08-13 15:51:48 -07004744 C_SB_ETH_TX_CQ_INDEX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004745 context->cstorm_st_context.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004746
4747 context->xstorm_ag_context.cdu_reserved =
4748 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4749 CDU_REGION_NUMBER_XCM_AG,
4750 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004751 }
4752}
4753
4754static void bnx2x_init_ind_table(struct bnx2x *bp)
4755{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004756 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004757 int i;
4758
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004759 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004760 return;
4761
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004762 DP(NETIF_MSG_IFUP,
4763 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004764 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004765 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004766 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Eilon Greenstein0626b892009-02-12 08:38:14 +00004767 bp->fp->cl_id + (i % bp->num_rx_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004768}
4769
Eliezer Tamir49d66772008-02-28 11:53:13 -08004770static void bnx2x_set_client_config(struct bnx2x *bp)
4771{
Eliezer Tamir49d66772008-02-28 11:53:13 -08004772 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004773 int port = BP_PORT(bp);
4774 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004775
Eilon Greensteine7799c52009-01-14 21:30:27 -08004776 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004777 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004778 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4779 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08004780#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08004781 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08004782 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004783 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08004784 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4785 }
4786#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08004787
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004788 if (bp->flags & TPA_ENABLE_FLAG) {
4789 tstorm_client.max_sges_for_packet =
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08004790 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004791 tstorm_client.max_sges_for_packet =
4792 ((tstorm_client.max_sges_for_packet +
4793 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4794 PAGES_PER_SGE_SHIFT;
4795
4796 tstorm_client.config_flags |=
4797 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4798 }
4799
Eliezer Tamir49d66772008-02-28 11:53:13 -08004800 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004801 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4802
Eliezer Tamir49d66772008-02-28 11:53:13 -08004803 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004804 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08004805 ((u32 *)&tstorm_client)[0]);
4806 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004807 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08004808 ((u32 *)&tstorm_client)[1]);
4809 }
4810
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004811 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4812 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08004813}
4814
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004815static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4816{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004817 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004818 int mode = bp->rx_mode;
4819 int mask = (1 << BP_L_ID(bp));
4820 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004821 int i;
4822
Eilon Greenstein3196a882008-08-13 15:58:49 -07004823 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004824
4825 switch (mode) {
4826 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004827 tstorm_mac_filter.ucast_drop_all = mask;
4828 tstorm_mac_filter.mcast_drop_all = mask;
4829 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004830 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004832 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004833 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004834 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004835
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004836 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004837 tstorm_mac_filter.mcast_accept_all = mask;
4838 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004839 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004840
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004841 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004842 tstorm_mac_filter.ucast_accept_all = mask;
4843 tstorm_mac_filter.mcast_accept_all = mask;
4844 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004845 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004846
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004847 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004848 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4849 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004850 }
4851
4852 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004854 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004855 ((u32 *)&tstorm_mac_filter)[i]);
4856
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004857/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004858 ((u32 *)&tstorm_mac_filter)[i]); */
4859 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004860
Eliezer Tamir49d66772008-02-28 11:53:13 -08004861 if (mode != BNX2X_RX_MODE_NONE)
4862 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004863}
4864
Eilon Greenstein471de712008-08-13 15:49:35 -07004865static void bnx2x_init_internal_common(struct bnx2x *bp)
4866{
4867 int i;
4868
Yitchak Gertner3cdf1db2008-08-25 15:24:21 -07004869 if (bp->flags & TPA_ENABLE_FLAG) {
4870 struct tstorm_eth_tpa_exist tpa = {0};
4871
4872 tpa.tpa_exist = 1;
4873
4874 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4875 ((u32 *)&tpa)[0]);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4877 ((u32 *)&tpa)[1]);
4878 }
4879
Eilon Greenstein471de712008-08-13 15:49:35 -07004880 /* Zero this manually as its initialization is
4881 currently missing in the initTool */
4882 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4883 REG_WR(bp, BAR_USTRORM_INTMEM +
4884 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4885}
4886
4887static void bnx2x_init_internal_port(struct bnx2x *bp)
4888{
4889 int port = BP_PORT(bp);
4890
4891 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4892 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4893 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4894 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4895}
4896
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00004897/* Calculates the sum of vn_min_rates.
4898 It's needed for further normalizing of the min_rates.
4899 Returns:
4900 sum of vn_min_rates.
4901 or
4902 0 - if all the min_rates are 0.
4903 In the later case fainess algorithm should be deactivated.
4904 If not all min_rates are zero then those that are zeroes will be set to 1.
4905 */
4906static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4907{
4908 int all_zero = 1;
4909 int port = BP_PORT(bp);
4910 int vn;
4911
4912 bp->vn_weight_sum = 0;
4913 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4914 int func = 2*vn + port;
4915 u32 vn_cfg =
4916 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4917 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4918 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4919
4920 /* Skip hidden vns */
4921 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4922 continue;
4923
4924 /* If min rate is zero - set it to 1 */
4925 if (!vn_min_rate)
4926 vn_min_rate = DEF_MIN_RATE;
4927 else
4928 all_zero = 0;
4929
4930 bp->vn_weight_sum += vn_min_rate;
4931 }
4932
4933 /* ... only if all min rates are zeros - disable fairness */
4934 if (all_zero)
4935 bp->vn_weight_sum = 0;
4936}
4937
Eilon Greenstein471de712008-08-13 15:49:35 -07004938static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004939{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004940 struct tstorm_eth_function_common_config tstorm_config = {0};
4941 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004942 int port = BP_PORT(bp);
4943 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004944 int i, j;
4945 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07004946 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004947
4948 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004949 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004950 tstorm_config.rss_result_mask = MULTI_MASK;
4951 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004952 if (IS_E1HMF(bp))
4953 tstorm_config.config_flags |=
4954 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004955
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004956 tstorm_config.leading_client_id = BP_L_ID(bp);
4957
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004958 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004959 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004960 (*(u32 *)&tstorm_config));
4961
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004962 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004963 bnx2x_set_storm_rx_mode(bp);
4964
Eilon Greensteinde832a52009-02-12 08:36:33 +00004965 for_each_queue(bp, i) {
4966 u8 cl_id = bp->fp[i].cl_id;
4967
4968 /* reset xstorm per client statistics */
4969 offset = BAR_XSTRORM_INTMEM +
4970 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4971 for (j = 0;
4972 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4973 REG_WR(bp, offset + j*4, 0);
4974
4975 /* reset tstorm per client statistics */
4976 offset = BAR_TSTRORM_INTMEM +
4977 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4978 for (j = 0;
4979 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4980 REG_WR(bp, offset + j*4, 0);
4981
4982 /* reset ustorm per client statistics */
4983 offset = BAR_USTRORM_INTMEM +
4984 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4985 for (j = 0;
4986 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4987 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004988 }
4989
4990 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004991 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004992
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004993 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004994 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004995 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004996 ((u32 *)&stats_flags)[1]);
4997
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004998 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004999 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005000 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005001 ((u32 *)&stats_flags)[1]);
5002
Eilon Greensteinde832a52009-02-12 08:36:33 +00005003 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5004 ((u32 *)&stats_flags)[0]);
5005 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5006 ((u32 *)&stats_flags)[1]);
5007
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005008 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005009 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005010 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005011 ((u32 *)&stats_flags)[1]);
5012
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005013 REG_WR(bp, BAR_XSTRORM_INTMEM +
5014 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5015 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5016 REG_WR(bp, BAR_XSTRORM_INTMEM +
5017 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5018 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5019
5020 REG_WR(bp, BAR_TSTRORM_INTMEM +
5021 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5022 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5023 REG_WR(bp, BAR_TSTRORM_INTMEM +
5024 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5025 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005026
Eilon Greensteinde832a52009-02-12 08:36:33 +00005027 REG_WR(bp, BAR_USTRORM_INTMEM +
5028 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5029 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
5031 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5032 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5033
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005034 if (CHIP_IS_E1H(bp)) {
5035 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5036 IS_E1HMF(bp));
5037 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5038 IS_E1HMF(bp));
5039 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5040 IS_E1HMF(bp));
5041 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5042 IS_E1HMF(bp));
5043
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005044 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5045 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005046 }
5047
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005048 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5049 max_agg_size =
5050 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5051 SGE_PAGE_SIZE * PAGES_PER_SGE),
5052 (u32)0xffff);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005053 for_each_rx_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005054 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005055
5056 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005057 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005058 U64_LO(fp->rx_comp_mapping));
5059 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005060 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005061 U64_HI(fp->rx_comp_mapping));
5062
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005063 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005064 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005065 max_agg_size);
5066 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005067
Eilon Greenstein1c063282009-02-12 08:36:43 +00005068 /* dropless flow control */
5069 if (CHIP_IS_E1H(bp)) {
5070 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5071
5072 rx_pause.bd_thr_low = 250;
5073 rx_pause.cqe_thr_low = 250;
5074 rx_pause.cos = 1;
5075 rx_pause.sge_thr_low = 0;
5076 rx_pause.bd_thr_high = 350;
5077 rx_pause.cqe_thr_high = 350;
5078 rx_pause.sge_thr_high = 0;
5079
5080 for_each_rx_queue(bp, i) {
5081 struct bnx2x_fastpath *fp = &bp->fp[i];
5082
5083 if (!fp->disable_tpa) {
5084 rx_pause.sge_thr_low = 150;
5085 rx_pause.sge_thr_high = 250;
5086 }
5087
5088
5089 offset = BAR_USTRORM_INTMEM +
5090 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5091 fp->cl_id);
5092 for (j = 0;
5093 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5094 j++)
5095 REG_WR(bp, offset + j*4,
5096 ((u32 *)&rx_pause)[j]);
5097 }
5098 }
5099
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005100 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5101
5102 /* Init rate shaping and fairness contexts */
5103 if (IS_E1HMF(bp)) {
5104 int vn;
5105
5106 /* During init there is no active link
5107 Until link is up, set link rate to 10Gbps */
5108 bp->link_vars.line_speed = SPEED_10000;
5109 bnx2x_init_port_minmax(bp);
5110
5111 bnx2x_calc_vn_weight_sum(bp);
5112
5113 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5114 bnx2x_init_vn_minmax(bp, 2*vn + port);
5115
5116 /* Enable rate shaping and fairness */
5117 bp->cmng.flags.cmng_enables =
5118 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5119 if (bp->vn_weight_sum)
5120 bp->cmng.flags.cmng_enables |=
5121 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5122 else
5123 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5124 " fairness will be disabled\n");
5125 } else {
5126 /* rate shaping and fairness are disabled */
5127 DP(NETIF_MSG_IFUP,
5128 "single function mode minmax will be disabled\n");
5129 }
5130
5131
5132 /* Store it to internal memory */
5133 if (bp->port.pmf)
5134 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5135 REG_WR(bp, BAR_XSTRORM_INTMEM +
5136 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5137 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138}
5139
Eilon Greenstein471de712008-08-13 15:49:35 -07005140static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5141{
5142 switch (load_code) {
5143 case FW_MSG_CODE_DRV_LOAD_COMMON:
5144 bnx2x_init_internal_common(bp);
5145 /* no break */
5146
5147 case FW_MSG_CODE_DRV_LOAD_PORT:
5148 bnx2x_init_internal_port(bp);
5149 /* no break */
5150
5151 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5152 bnx2x_init_internal_func(bp);
5153 break;
5154
5155 default:
5156 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5157 break;
5158 }
5159}
5160
5161static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162{
5163 int i;
5164
5165 for_each_queue(bp, i) {
5166 struct bnx2x_fastpath *fp = &bp->fp[i];
5167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005168 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005169 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005170 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005171 fp->cl_id = BP_L_ID(bp) + i;
5172 fp->sb_id = fp->cl_id;
5173 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005174 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5175 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005176 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005177 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005178 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005179 }
5180
Eilon Greenstein16119782009-03-02 07:59:27 +00005181 /* ensure status block indices were read */
5182 rmb();
5183
5184
Eilon Greenstein5c862842008-08-13 15:51:48 -07005185 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5186 DEF_SB_ID);
5187 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005188 bnx2x_update_coalesce(bp);
5189 bnx2x_init_rx_rings(bp);
5190 bnx2x_init_tx_ring(bp);
5191 bnx2x_init_sp_ring(bp);
5192 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005193 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005194 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005195 bnx2x_stats_init(bp);
5196
5197 /* At this point, we are ready for interrupts */
5198 atomic_set(&bp->intr_sem, 0);
5199
5200 /* flush all before enabling interrupts */
5201 mb();
5202 mmiowb();
5203
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005204 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005205
5206 /* Check for SPIO5 */
5207 bnx2x_attn_int_deasserted0(bp,
5208 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5209 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005210}
5211
5212/* end of nic init */
5213
5214/*
5215 * gzip service functions
5216 */
5217
5218static int bnx2x_gunzip_init(struct bnx2x *bp)
5219{
5220 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5221 &bp->gunzip_mapping);
5222 if (bp->gunzip_buf == NULL)
5223 goto gunzip_nomem1;
5224
5225 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5226 if (bp->strm == NULL)
5227 goto gunzip_nomem2;
5228
5229 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5230 GFP_KERNEL);
5231 if (bp->strm->workspace == NULL)
5232 goto gunzip_nomem3;
5233
5234 return 0;
5235
5236gunzip_nomem3:
5237 kfree(bp->strm);
5238 bp->strm = NULL;
5239
5240gunzip_nomem2:
5241 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5242 bp->gunzip_mapping);
5243 bp->gunzip_buf = NULL;
5244
5245gunzip_nomem1:
5246 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005247 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248 return -ENOMEM;
5249}
5250
5251static void bnx2x_gunzip_end(struct bnx2x *bp)
5252{
5253 kfree(bp->strm->workspace);
5254
5255 kfree(bp->strm);
5256 bp->strm = NULL;
5257
5258 if (bp->gunzip_buf) {
5259 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5260 bp->gunzip_mapping);
5261 bp->gunzip_buf = NULL;
5262 }
5263}
5264
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005265static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266{
5267 int n, rc;
5268
5269 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005270 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5271 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005273 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005274
5275 n = 10;
5276
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005277#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005278
5279 if (zbuf[3] & FNAME)
5280 while ((zbuf[n++] != 0) && (n < len));
5281
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005282 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005283 bp->strm->avail_in = len - n;
5284 bp->strm->next_out = bp->gunzip_buf;
5285 bp->strm->avail_out = FW_BUF_SIZE;
5286
5287 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5288 if (rc != Z_OK)
5289 return rc;
5290
5291 rc = zlib_inflate(bp->strm, Z_FINISH);
5292 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5293 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5294 bp->dev->name, bp->strm->msg);
5295
5296 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5297 if (bp->gunzip_outlen & 0x3)
5298 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5299 " gunzip_outlen (%d) not aligned\n",
5300 bp->dev->name, bp->gunzip_outlen);
5301 bp->gunzip_outlen >>= 2;
5302
5303 zlib_inflateEnd(bp->strm);
5304
5305 if (rc == Z_STREAM_END)
5306 return 0;
5307
5308 return rc;
5309}
5310
5311/* nic load/unload */
5312
5313/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005314 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005315 */
5316
5317/* send a NIG loopback debug packet */
5318static void bnx2x_lb_pckt(struct bnx2x *bp)
5319{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005320 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005321
5322 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005323 wb_write[0] = 0x55555555;
5324 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005325 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005326 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327
5328 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005329 wb_write[0] = 0x09000000;
5330 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005331 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005332 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005333}
5334
5335/* some of the internal memories
5336 * are not directly readable from the driver
5337 * to test them we send debug packets
5338 */
5339static int bnx2x_int_mem_test(struct bnx2x *bp)
5340{
5341 int factor;
5342 int count, i;
5343 u32 val = 0;
5344
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005345 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005346 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005347 else if (CHIP_REV_IS_EMUL(bp))
5348 factor = 200;
5349 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005350 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005351
5352 DP(NETIF_MSG_HW, "start part1\n");
5353
5354 /* Disable inputs of parser neighbor blocks */
5355 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5356 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5357 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005358 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005359
5360 /* Write 0 to parser credits for CFC search request */
5361 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5362
5363 /* send Ethernet packet */
5364 bnx2x_lb_pckt(bp);
5365
5366 /* TODO do i reset NIG statistic? */
5367 /* Wait until NIG register shows 1 packet of size 0x10 */
5368 count = 1000 * factor;
5369 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005370
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005371 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5372 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005373 if (val == 0x10)
5374 break;
5375
5376 msleep(10);
5377 count--;
5378 }
5379 if (val != 0x10) {
5380 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5381 return -1;
5382 }
5383
5384 /* Wait until PRS register shows 1 packet */
5385 count = 1000 * factor;
5386 while (count) {
5387 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005388 if (val == 1)
5389 break;
5390
5391 msleep(10);
5392 count--;
5393 }
5394 if (val != 0x1) {
5395 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5396 return -2;
5397 }
5398
5399 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005400 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005401 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005402 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005403 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005404 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5405 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005406
5407 DP(NETIF_MSG_HW, "part2\n");
5408
5409 /* Disable inputs of parser neighbor blocks */
5410 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5411 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5412 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005413 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005414
5415 /* Write 0 to parser credits for CFC search request */
5416 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5417
5418 /* send 10 Ethernet packets */
5419 for (i = 0; i < 10; i++)
5420 bnx2x_lb_pckt(bp);
5421
5422 /* Wait until NIG register shows 10 + 1
5423 packets of size 11*0x10 = 0xb0 */
5424 count = 1000 * factor;
5425 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005426
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005427 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5428 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005429 if (val == 0xb0)
5430 break;
5431
5432 msleep(10);
5433 count--;
5434 }
5435 if (val != 0xb0) {
5436 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5437 return -3;
5438 }
5439
5440 /* Wait until PRS register shows 2 packets */
5441 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5442 if (val != 2)
5443 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5444
5445 /* Write 1 to parser credits for CFC search request */
5446 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5447
5448 /* Wait until PRS register shows 3 packets */
5449 msleep(10 * factor);
5450 /* Wait until NIG register shows 1 packet of size 0x10 */
5451 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5452 if (val != 3)
5453 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5454
5455 /* clear NIG EOP FIFO */
5456 for (i = 0; i < 11; i++)
5457 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5458 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5459 if (val != 1) {
5460 BNX2X_ERR("clear of NIG failed\n");
5461 return -4;
5462 }
5463
5464 /* Reset and init BRB, PRS, NIG */
5465 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5466 msleep(50);
5467 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5468 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005469 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5470 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005471#ifndef BCM_ISCSI
5472 /* set NIC mode */
5473 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5474#endif
5475
5476 /* Enable inputs of parser neighbor blocks */
5477 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5478 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5479 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005480 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005481
5482 DP(NETIF_MSG_HW, "done\n");
5483
5484 return 0; /* OK */
5485}
5486
5487static void enable_blocks_attention(struct bnx2x *bp)
5488{
5489 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5490 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5491 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5492 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5493 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5494 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5495 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5496 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5497 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005498/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5499/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005500 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5501 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5502 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005503/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5504/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005505 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5506 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5507 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5508 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005509/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5510/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5511 if (CHIP_REV_IS_FPGA(bp))
5512 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5513 else
5514 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005515 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5516 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5517 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005518/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5519/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005520 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5521 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005522/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5523 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005524}
5525
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005526
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005527static void bnx2x_reset_common(struct bnx2x *bp)
5528{
5529 /* reset_common */
5530 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5531 0xd3ffff7f);
5532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5533}
5534
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005535
5536static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5537{
5538 u32 val;
5539 u8 port;
5540 u8 is_required = 0;
5541
5542 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5543 SHARED_HW_CFG_FAN_FAILURE_MASK;
5544
5545 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5546 is_required = 1;
5547
5548 /*
5549 * The fan failure mechanism is usually related to the PHY type since
5550 * the power consumption of the board is affected by the PHY. Currently,
5551 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5552 */
5553 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5554 for (port = PORT_0; port < PORT_MAX; port++) {
5555 u32 phy_type =
5556 SHMEM_RD(bp, dev_info.port_hw_config[port].
5557 external_phy_config) &
5558 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5559 is_required |=
5560 ((phy_type ==
5561 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5562 (phy_type ==
5563 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5564 }
5565
5566 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5567
5568 if (is_required == 0)
5569 return;
5570
5571 /* Fan failure is indicated by SPIO 5 */
5572 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5573 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5574
5575 /* set to active low mode */
5576 val = REG_RD(bp, MISC_REG_SPIO_INT);
5577 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5578 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5579 REG_WR(bp, MISC_REG_SPIO_INT, val);
5580
5581 /* enable interrupt to signal the IGU */
5582 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5583 val |= (1 << MISC_REGISTERS_SPIO_5);
5584 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5585}
5586
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005587static int bnx2x_init_common(struct bnx2x *bp)
5588{
5589 u32 val, i;
5590
5591 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5592
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005593 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005594 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5595 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5596
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005597 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005598 if (CHIP_IS_E1H(bp))
5599 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5600
5601 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5602 msleep(30);
5603 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5604
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005605 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005606 if (CHIP_IS_E1(bp)) {
5607 /* enable HW interrupt from PXP on USDM overflow
5608 bit 16 on INT_MASK_0 */
5609 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005610 }
5611
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005612 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005613 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005614
5615#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005616 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5617 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5618 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5619 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5620 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005621 /* make sure this value is 0 */
5622 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005623
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005624/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5625 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5626 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5627 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5628 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005629#endif
5630
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005631 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005632#ifdef BCM_ISCSI
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005633 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5634 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5635 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005636#endif
5637
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005638 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5639 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005640
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005641 /* let the HW do it's magic ... */
5642 msleep(100);
5643 /* finish PXP init */
5644 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5645 if (val != 1) {
5646 BNX2X_ERR("PXP2 CFG failed\n");
5647 return -EBUSY;
5648 }
5649 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5650 if (val != 1) {
5651 BNX2X_ERR("PXP2 RD_INIT failed\n");
5652 return -EBUSY;
5653 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005654
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005655 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5656 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005657
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005658 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005659
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005660 /* clean the DMAE memory */
5661 bp->dmae_ready = 1;
5662 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005663
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005664 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5665 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5666 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5667 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005668
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005669 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5670 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5671 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5672 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5673
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005674 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005675 /* soft reset pulse */
5676 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5677 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005678
5679#ifdef BCM_ISCSI
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005680 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005681#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005682
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005683 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005684 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5685 if (!CHIP_REV_IS_SLOW(bp)) {
5686 /* enable hw interrupt from doorbell Q */
5687 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5688 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005689
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005690 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5691 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005692 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005693 /* set NIC mode */
5694 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005695 if (CHIP_IS_E1H(bp))
5696 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005697
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005698 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5699 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5700 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5701 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005702
Eilon Greenstein490c3c92009-03-02 07:59:52 +00005703 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5704 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5705 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5706 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005707
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005708 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5709 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5710 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5711 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005712
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005713 /* sync semi rtc */
5714 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5715 0x80000000);
5716 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5717 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005718
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005719 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5720 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5721 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005722
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005723 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5724 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5725 REG_WR(bp, i, 0xc0cac01a);
5726 /* TODO: replace with something meaningful */
5727 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005728 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005729 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005730
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005731 if (sizeof(union cdu_context) != 1024)
5732 /* we currently assume that a context is 1024 bytes */
5733 printk(KERN_ALERT PFX "please adjust the size of"
5734 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005735
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005736 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005737 val = (4 << 24) + (0 << 12) + 1024;
5738 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5739 if (CHIP_IS_E1(bp)) {
5740 /* !!! fix pxp client crdit until excel update */
5741 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5742 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5743 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005744
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005745 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005746 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005747 /* enable context validation interrupt from CFC */
5748 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5749
5750 /* set the thresholds to prevent CFC/CDU race */
5751 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005752
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005753 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5754 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005755
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005756 /* PXPCS COMMON comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005757 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005758 /* Reset PCIE errors for debug */
5759 REG_WR(bp, 0x2814, 0xffffffff);
5760 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005761
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005762 /* EMAC0 COMMON comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005763 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005764 /* EMAC1 COMMON comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005765 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005766 /* DBU COMMON comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005767 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005768 /* DBG COMMON comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005769 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005770
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005771 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005772 if (CHIP_IS_E1H(bp)) {
5773 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5774 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5775 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005776
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005777 if (CHIP_REV_IS_SLOW(bp))
5778 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005779
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005780 /* finish CFC init */
5781 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5782 if (val != 1) {
5783 BNX2X_ERR("CFC LL_INIT failed\n");
5784 return -EBUSY;
5785 }
5786 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5787 if (val != 1) {
5788 BNX2X_ERR("CFC AC_INIT failed\n");
5789 return -EBUSY;
5790 }
5791 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5792 if (val != 1) {
5793 BNX2X_ERR("CFC CAM_INIT failed\n");
5794 return -EBUSY;
5795 }
5796 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005797
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005798 /* read NIG statistic
5799 to see if this is our first up since powerup */
5800 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5801 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005802
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005803 /* do internal memory self test */
5804 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5805 BNX2X_ERR("internal mem self test failed\n");
5806 return -EBUSY;
5807 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005808
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00005809 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00005810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5811 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5812 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5813 bp->port.need_hw_lock = 1;
5814 break;
5815
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005816 default:
5817 break;
5818 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08005819
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005820 bnx2x_setup_fan_failure_detection(bp);
5821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005822 /* clear PXP2 attentions */
5823 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005824
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005825 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005826
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005827 if (!BP_NOMCP(bp)) {
5828 bnx2x_acquire_phy_lock(bp);
5829 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5830 bnx2x_release_phy_lock(bp);
5831 } else
5832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5833
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005834 return 0;
5835}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005836
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005837static int bnx2x_init_port(struct bnx2x *bp)
5838{
5839 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005840 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005841 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005842 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005843
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005844 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5845
5846 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005847
5848 /* Port PXP comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005849 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005850 /* Port PXP2 comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005851 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005852#ifdef BCM_ISCSI
5853 /* Port0 1
5854 * Port1 385 */
5855 i++;
5856 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5857 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5858 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5859 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5860
5861 /* Port0 2
5862 * Port1 386 */
5863 i++;
5864 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5865 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5866 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5867 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5868
5869 /* Port0 3
5870 * Port1 387 */
5871 i++;
5872 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5873 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5874 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5875 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5876#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005877 /* Port CMs come here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005878 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879
5880 /* Port QM comes here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005881#ifdef BCM_ISCSI
5882 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5883 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5884
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005885 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005886#endif
5887 /* Port DQ comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005888 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005889
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005890 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005891 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5892 /* no pause for emulation and FPGA */
5893 low = 0;
5894 high = 513;
5895 } else {
5896 if (IS_E1HMF(bp))
5897 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5898 else if (bp->dev->mtu > 4096) {
5899 if (bp->flags & ONE_PORT_FLAG)
5900 low = 160;
5901 else {
5902 val = bp->dev->mtu;
5903 /* (24*1024 + val*4)/256 */
5904 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5905 }
5906 } else
5907 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5908 high = low + 56; /* 14*1024/256 */
5909 }
5910 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5911 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5912
5913
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005914 /* Port PRS comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005915 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005916 /* Port TSDM comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005917 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005918 /* Port CSDM comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005919 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005920 /* Port USDM comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005921 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005922 /* Port XSDM comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005923 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005924
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005925 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5926 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5927 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5928 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005929
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005930 /* Port UPB comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005931 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005932 /* Port XPB comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005933 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005934
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005935 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005936
5937 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005938 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005939
5940 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005941 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005942 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005943 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005944
5945 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005946 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005948 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005949
5950#ifdef BCM_ISCSI
5951 /* tell the searcher where the T2 table is */
5952 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5953
5954 wb_write[0] = U64_LO(bp->t2_mapping);
5955 wb_write[1] = U64_HI(bp->t2_mapping);
5956 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5957 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5958 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5959 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5960
5961 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5962 /* Port SRCH comes here */
5963#endif
5964 /* Port CDU comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005965 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005966 /* Port CFC comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005967 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005968
5969 if (CHIP_IS_E1(bp)) {
5970 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5972 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005973 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005974
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005975 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005976 /* init aeu_mask_attn_func_0/1:
5977 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5978 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5979 * bits 4-7 are used for "per vn group attention" */
5980 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5981 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005983 /* Port PXPCS comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005984 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005985 /* Port EMAC0 comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005986 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005987 /* Port EMAC1 comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005988 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005989 /* Port DBU comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005990 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991 /* Port DBG comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005992 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005993
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005994 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005995
5996 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5997
5998 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005999 /* 0x2 disable e1hov, 0x1 enable */
6000 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6001 (IS_E1HMF(bp) ? 0x1 : 0x2));
6002
Eilon Greenstein1c063282009-02-12 08:36:43 +00006003 /* support pause requests from USDM, TSDM and BRB */
6004 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6005
6006 {
6007 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6008 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6009 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6010 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006011 }
6012
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006013 /* Port MCP comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006014 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006015 /* Port DMAE comes here */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006016 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006017
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006018 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6020 {
6021 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6022
6023 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6024 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6025
6026 /* The GPIO should be swapped if the swap register is
6027 set and active */
6028 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6029 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6030
6031 /* Select function upon port-swap configuration */
6032 if (port == 0) {
6033 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6034 aeu_gpio_mask = (swap_val && swap_override) ?
6035 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6036 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6037 } else {
6038 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6039 aeu_gpio_mask = (swap_val && swap_override) ?
6040 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6041 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6042 }
6043 val = REG_RD(bp, offset);
6044 /* add GPIO3 to group */
6045 val |= aeu_gpio_mask;
6046 REG_WR(bp, offset, val);
6047 }
6048 break;
6049
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006051 /* add SPIO 5 to group 0 */
6052 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6053 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6054 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6055 break;
6056
6057 default:
6058 break;
6059 }
6060
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006061 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006062
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006063 return 0;
6064}
6065
6066#define ILT_PER_FUNC (768/2)
6067#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6068/* the phys address is shifted right 12 bits and has an added
6069 1=valid bit added to the 53rd bit
6070 then since this is a wide register(TM)
6071 we split it into two 32 bit writes
6072 */
6073#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6074#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6075#define PXP_ONE_ILT(x) (((x) << 10) | x)
6076#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6077
6078#define CNIC_ILT_LINES 0
6079
6080static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6081{
6082 int reg;
6083
6084 if (CHIP_IS_E1H(bp))
6085 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6086 else /* E1 */
6087 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6088
6089 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6090}
6091
6092static int bnx2x_init_func(struct bnx2x *bp)
6093{
6094 int port = BP_PORT(bp);
6095 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006096 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006097 int i;
6098
6099 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6100
Eilon Greenstein8badd272009-02-12 08:36:15 +00006101 /* set MSI reconfigure capability */
6102 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6103 val = REG_RD(bp, addr);
6104 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6105 REG_WR(bp, addr, val);
6106
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006107 i = FUNC_ILT_BASE(func);
6108
6109 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6110 if (CHIP_IS_E1H(bp)) {
6111 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6112 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6113 } else /* E1 */
6114 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6115 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6116
6117
6118 if (CHIP_IS_E1H(bp)) {
6119 for (i = 0; i < 9; i++)
6120 bnx2x_init_block(bp,
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006121 cm_blocks[i], FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006122
6123 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6124 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6125 }
6126
6127 /* HC init per function */
6128 if (CHIP_IS_E1H(bp)) {
6129 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6130
6131 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6132 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6133 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006134 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006135
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006136 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006137 REG_WR(bp, 0x2114, 0xffffffff);
6138 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006139
6140 return 0;
6141}
6142
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006143static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6144{
6145 int i, rc = 0;
6146
6147 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6148 BP_FUNC(bp), load_code);
6149
6150 bp->dmae_ready = 0;
6151 mutex_init(&bp->dmae_mutex);
6152 bnx2x_gunzip_init(bp);
6153
6154 switch (load_code) {
6155 case FW_MSG_CODE_DRV_LOAD_COMMON:
6156 rc = bnx2x_init_common(bp);
6157 if (rc)
6158 goto init_hw_err;
6159 /* no break */
6160
6161 case FW_MSG_CODE_DRV_LOAD_PORT:
6162 bp->dmae_ready = 1;
6163 rc = bnx2x_init_port(bp);
6164 if (rc)
6165 goto init_hw_err;
6166 /* no break */
6167
6168 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6169 bp->dmae_ready = 1;
6170 rc = bnx2x_init_func(bp);
6171 if (rc)
6172 goto init_hw_err;
6173 break;
6174
6175 default:
6176 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6177 break;
6178 }
6179
6180 if (!BP_NOMCP(bp)) {
6181 int func = BP_FUNC(bp);
6182
6183 bp->fw_drv_pulse_wr_seq =
6184 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6185 DRV_PULSE_SEQ_MASK);
6186 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6187 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6188 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6189 } else
6190 bp->func_stx = 0;
6191
6192 /* this needs to be done before gunzip end */
6193 bnx2x_zero_def_sb(bp);
6194 for_each_queue(bp, i)
6195 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6196
6197init_hw_err:
6198 bnx2x_gunzip_end(bp);
6199
6200 return rc;
6201}
6202
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006203/* send the MCP a request, block until there is a reply */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006204static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6205{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006206 int func = BP_FUNC(bp);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006207 u32 seq = ++bp->fw_seq;
6208 u32 rc = 0;
Eilon Greenstein19680c42008-08-13 15:47:33 -07006209 u32 cnt = 1;
6210 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006211
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006212 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
Eliezer Tamirf1410642008-02-28 11:51:50 -08006213 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006214
Eilon Greenstein19680c42008-08-13 15:47:33 -07006215 do {
6216 /* let the FW do it's magic ... */
6217 msleep(delay);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006218
Eilon Greenstein19680c42008-08-13 15:47:33 -07006219 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006220
Eilon Greenstein19680c42008-08-13 15:47:33 -07006221 /* Give the FW up to 2 second (200*10ms) */
6222 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6223
6224 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6225 cnt*delay, rc, seq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006226
6227 /* is this a reply to our command? */
6228 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6229 rc &= FW_MSG_CODE_MASK;
Eliezer Tamirf1410642008-02-28 11:51:50 -08006230
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006231 } else {
6232 /* FW BUG! */
6233 BNX2X_ERR("FW failed to respond!\n");
6234 bnx2x_fw_dump(bp);
6235 rc = 0;
6236 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006237
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006238 return rc;
6239}
6240
6241static void bnx2x_free_mem(struct bnx2x *bp)
6242{
6243
6244#define BNX2X_PCI_FREE(x, y, size) \
6245 do { \
6246 if (x) { \
6247 pci_free_consistent(bp->pdev, size, x, y); \
6248 x = NULL; \
6249 y = 0; \
6250 } \
6251 } while (0)
6252
6253#define BNX2X_FREE(x) \
6254 do { \
6255 if (x) { \
6256 vfree(x); \
6257 x = NULL; \
6258 } \
6259 } while (0)
6260
6261 int i;
6262
6263 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006264 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006265 for_each_queue(bp, i) {
6266
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006267 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006268 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6269 bnx2x_fp(bp, i, status_blk_mapping),
6270 sizeof(struct host_status_block) +
6271 sizeof(struct eth_tx_db_data));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006272 }
6273 /* Rx */
6274 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006275
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006276 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006277 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6278 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6279 bnx2x_fp(bp, i, rx_desc_mapping),
6280 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6281
6282 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6283 bnx2x_fp(bp, i, rx_comp_mapping),
6284 sizeof(struct eth_fast_path_rx_cqe) *
6285 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006286
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006287 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006288 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006289 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6290 bnx2x_fp(bp, i, rx_sge_mapping),
6291 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6292 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006293 /* Tx */
6294 for_each_tx_queue(bp, i) {
6295
6296 /* fastpath tx rings: tx_buf tx_desc */
6297 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6298 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6299 bnx2x_fp(bp, i, tx_desc_mapping),
6300 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6301 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006302 /* end of fastpath */
6303
6304 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006305 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006306
6307 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006308 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006309
6310#ifdef BCM_ISCSI
6311 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6312 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6313 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6314 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6315#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006316 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006317
6318#undef BNX2X_PCI_FREE
6319#undef BNX2X_KFREE
6320}
6321
6322static int bnx2x_alloc_mem(struct bnx2x *bp)
6323{
6324
6325#define BNX2X_PCI_ALLOC(x, y, size) \
6326 do { \
6327 x = pci_alloc_consistent(bp->pdev, size, y); \
6328 if (x == NULL) \
6329 goto alloc_mem_err; \
6330 memset(x, 0, size); \
6331 } while (0)
6332
6333#define BNX2X_ALLOC(x, size) \
6334 do { \
6335 x = vmalloc(size); \
6336 if (x == NULL) \
6337 goto alloc_mem_err; \
6338 memset(x, 0, size); \
6339 } while (0)
6340
6341 int i;
6342
6343 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006344 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006345 for_each_queue(bp, i) {
6346 bnx2x_fp(bp, i, bp) = bp;
6347
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006348 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006349 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6350 &bnx2x_fp(bp, i, status_blk_mapping),
6351 sizeof(struct host_status_block) +
6352 sizeof(struct eth_tx_db_data));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006353 }
6354 /* Rx */
6355 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006356
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006357 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006358 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6359 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6360 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6361 &bnx2x_fp(bp, i, rx_desc_mapping),
6362 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6363
6364 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6365 &bnx2x_fp(bp, i, rx_comp_mapping),
6366 sizeof(struct eth_fast_path_rx_cqe) *
6367 NUM_RCQ_BD);
6368
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006369 /* SGE ring */
6370 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6371 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6372 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6373 &bnx2x_fp(bp, i, rx_sge_mapping),
6374 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006375 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006376 /* Tx */
6377 for_each_tx_queue(bp, i) {
6378
6379 bnx2x_fp(bp, i, hw_tx_prods) =
6380 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6381
6382 bnx2x_fp(bp, i, tx_prods_mapping) =
6383 bnx2x_fp(bp, i, status_blk_mapping) +
6384 sizeof(struct host_status_block);
6385
6386 /* fastpath tx rings: tx_buf tx_desc */
6387 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6388 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6389 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6390 &bnx2x_fp(bp, i, tx_desc_mapping),
6391 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6392 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006393 /* end of fastpath */
6394
6395 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6396 sizeof(struct host_def_status_block));
6397
6398 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6399 sizeof(struct bnx2x_slowpath));
6400
6401#ifdef BCM_ISCSI
6402 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6403
6404 /* Initialize T1 */
6405 for (i = 0; i < 64*1024; i += 64) {
6406 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6407 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6408 }
6409
6410 /* allocate searcher T2 table
6411 we allocate 1/4 of alloc num for T2
6412 (which is not entered into the ILT) */
6413 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6414
6415 /* Initialize T2 */
6416 for (i = 0; i < 16*1024; i += 64)
6417 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6418
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006419 /* now fixup the last line in the block to point to the next block */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006420 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6421
6422 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6423 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6424
6425 /* QM queues (128*MAX_CONN) */
6426 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6427#endif
6428
6429 /* Slow path ring */
6430 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6431
6432 return 0;
6433
6434alloc_mem_err:
6435 bnx2x_free_mem(bp);
6436 return -ENOMEM;
6437
6438#undef BNX2X_PCI_ALLOC
6439#undef BNX2X_ALLOC
6440}
6441
6442static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6443{
6444 int i;
6445
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006446 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006447 struct bnx2x_fastpath *fp = &bp->fp[i];
6448
6449 u16 bd_cons = fp->tx_bd_cons;
6450 u16 sw_prod = fp->tx_pkt_prod;
6451 u16 sw_cons = fp->tx_pkt_cons;
6452
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006453 while (sw_cons != sw_prod) {
6454 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6455 sw_cons++;
6456 }
6457 }
6458}
6459
6460static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6461{
6462 int i, j;
6463
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006464 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006465 struct bnx2x_fastpath *fp = &bp->fp[j];
6466
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006467 for (i = 0; i < NUM_RX_BD; i++) {
6468 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6469 struct sk_buff *skb = rx_buf->skb;
6470
6471 if (skb == NULL)
6472 continue;
6473
6474 pci_unmap_single(bp->pdev,
6475 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006476 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006477
6478 rx_buf->skb = NULL;
6479 dev_kfree_skb(skb);
6480 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006481 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006482 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6483 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006484 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006485 }
6486}
6487
6488static void bnx2x_free_skbs(struct bnx2x *bp)
6489{
6490 bnx2x_free_tx_skbs(bp);
6491 bnx2x_free_rx_skbs(bp);
6492}
6493
6494static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6495{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006496 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006497
6498 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006499 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006500 bp->msix_table[0].vector);
6501
6502 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006503 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006504 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006505 bnx2x_fp(bp, i, state));
6506
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006507 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006508 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006509}
6510
6511static void bnx2x_free_irq(struct bnx2x *bp)
6512{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006513 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006514 bnx2x_free_msix_irqs(bp);
6515 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006516 bp->flags &= ~USING_MSIX_FLAG;
6517
Eilon Greenstein8badd272009-02-12 08:36:15 +00006518 } else if (bp->flags & USING_MSI_FLAG) {
6519 free_irq(bp->pdev->irq, bp->dev);
6520 pci_disable_msi(bp->pdev);
6521 bp->flags &= ~USING_MSI_FLAG;
6522
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006523 } else
6524 free_irq(bp->pdev->irq, bp->dev);
6525}
6526
6527static int bnx2x_enable_msix(struct bnx2x *bp)
6528{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006529 int i, rc, offset = 1;
6530 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006531
Eilon Greenstein8badd272009-02-12 08:36:15 +00006532 bp->msix_table[0].entry = igu_vec;
6533 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006534
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006535 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006536 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006537 bp->msix_table[i + offset].entry = igu_vec;
6538 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6539 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006540 }
6541
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006542 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006543 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006544 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006545 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6546 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006547 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006548
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006549 bp->flags |= USING_MSIX_FLAG;
6550
6551 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006552}
6553
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006554static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6555{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006556 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006557
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006558 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6559 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006560 if (rc) {
6561 BNX2X_ERR("request sp irq failed\n");
6562 return -EBUSY;
6563 }
6564
6565 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006566 struct bnx2x_fastpath *fp = &bp->fp[i];
6567
6568 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006569 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006570 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006571 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006572 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006573 bnx2x_free_msix_irqs(bp);
6574 return -EBUSY;
6575 }
6576
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006577 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006578 }
6579
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006580 i = BNX2X_NUM_QUEUES(bp);
6581 if (is_multi(bp))
6582 printk(KERN_INFO PFX
6583 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6584 bp->dev->name, bp->msix_table[0].vector,
6585 bp->msix_table[offset].vector,
6586 bp->msix_table[offset + i - 1].vector);
6587 else
6588 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6589 bp->dev->name, bp->msix_table[0].vector,
6590 bp->msix_table[offset + i - 1].vector);
6591
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006592 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006593}
6594
Eilon Greenstein8badd272009-02-12 08:36:15 +00006595static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006596{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006597 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006598
Eilon Greenstein8badd272009-02-12 08:36:15 +00006599 rc = pci_enable_msi(bp->pdev);
6600 if (rc) {
6601 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6602 return -1;
6603 }
6604 bp->flags |= USING_MSI_FLAG;
6605
6606 return 0;
6607}
6608
6609static int bnx2x_req_irq(struct bnx2x *bp)
6610{
6611 unsigned long flags;
6612 int rc;
6613
6614 if (bp->flags & USING_MSI_FLAG)
6615 flags = 0;
6616 else
6617 flags = IRQF_SHARED;
6618
6619 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006620 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006621 if (!rc)
6622 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6623
6624 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006625}
6626
Yitchak Gertner65abd742008-08-25 15:26:24 -07006627static void bnx2x_napi_enable(struct bnx2x *bp)
6628{
6629 int i;
6630
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006631 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006632 napi_enable(&bnx2x_fp(bp, i, napi));
6633}
6634
6635static void bnx2x_napi_disable(struct bnx2x *bp)
6636{
6637 int i;
6638
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006639 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006640 napi_disable(&bnx2x_fp(bp, i, napi));
6641}
6642
6643static void bnx2x_netif_start(struct bnx2x *bp)
6644{
6645 if (atomic_dec_and_test(&bp->intr_sem)) {
6646 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006647 bnx2x_napi_enable(bp);
6648 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006649 if (bp->state == BNX2X_STATE_OPEN)
6650 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006651 }
6652 }
6653}
6654
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006655static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006656{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006657 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00006658 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00006659 netif_tx_disable(bp->dev);
6660 bp->dev->trans_start = jiffies; /* prevent tx timeout */
Yitchak Gertner65abd742008-08-25 15:26:24 -07006661}
6662
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006663/*
6664 * Init service functions
6665 */
6666
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006667static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006668{
6669 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006670 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006671
6672 /* CAM allocation
6673 * unicasts 0-31:port0 32-63:port1
6674 * multicast 64-127:port0 128-191:port1
6675 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006676 config->hdr.length = 2;
Eilon Greensteinaf246402009-01-14 06:43:59 +00006677 config->hdr.offset = port ? 32 : 0;
Eilon Greenstein0626b892009-02-12 08:38:14 +00006678 config->hdr.client_id = bp->fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006679 config->hdr.reserved1 = 0;
6680
6681 /* primary MAC */
6682 config->config_table[0].cam_entry.msb_mac_addr =
6683 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6684 config->config_table[0].cam_entry.middle_mac_addr =
6685 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6686 config->config_table[0].cam_entry.lsb_mac_addr =
6687 swab16(*(u16 *)&bp->dev->dev_addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006688 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006689 if (set)
6690 config->config_table[0].target_table_entry.flags = 0;
6691 else
6692 CAM_INVALIDATE(config->config_table[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006693 config->config_table[0].target_table_entry.client_id = 0;
6694 config->config_table[0].target_table_entry.vlan_id = 0;
6695
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006696 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6697 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006698 config->config_table[0].cam_entry.msb_mac_addr,
6699 config->config_table[0].cam_entry.middle_mac_addr,
6700 config->config_table[0].cam_entry.lsb_mac_addr);
6701
6702 /* broadcast */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00006703 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6704 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6705 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006706 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006707 if (set)
6708 config->config_table[1].target_table_entry.flags =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006709 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006710 else
6711 CAM_INVALIDATE(config->config_table[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006712 config->config_table[1].target_table_entry.client_id = 0;
6713 config->config_table[1].target_table_entry.vlan_id = 0;
6714
6715 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6716 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6717 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6718}
6719
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006720static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006721{
6722 struct mac_configuration_cmd_e1h *config =
6723 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6724
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006725 if (set && (bp->state != BNX2X_STATE_OPEN)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006726 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6727 return;
6728 }
6729
6730 /* CAM allocation for E1H
6731 * unicasts: by func number
6732 * multicast: 20+FUNC*20, 20 each
6733 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006734 config->hdr.length = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006735 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00006736 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006737 config->hdr.reserved1 = 0;
6738
6739 /* primary MAC */
6740 config->config_table[0].msb_mac_addr =
6741 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6742 config->config_table[0].middle_mac_addr =
6743 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6744 config->config_table[0].lsb_mac_addr =
6745 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6746 config->config_table[0].client_id = BP_L_ID(bp);
6747 config->config_table[0].vlan_id = 0;
6748 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006749 if (set)
6750 config->config_table[0].flags = BP_PORT(bp);
6751 else
6752 config->config_table[0].flags =
6753 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006754
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006755 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6756 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006757 config->config_table[0].msb_mac_addr,
6758 config->config_table[0].middle_mac_addr,
6759 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6760
6761 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6762 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6763 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6764}
6765
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006766static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6767 int *state_p, int poll)
6768{
6769 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006770 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006771
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006772 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6773 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006774
6775 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006776 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006777 if (poll) {
6778 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006779 /* if index is different from 0
6780 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006781 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006782 */
6783 if (idx)
6784 bnx2x_rx_int(&bp->fp[idx], 10);
6785 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006787 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006788 if (*state_p == state) {
6789#ifdef BNX2X_STOP_ON_ERROR
6790 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6791#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006792 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006793 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006794
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006795 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006796 }
6797
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006798 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006799 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6800 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006801#ifdef BNX2X_STOP_ON_ERROR
6802 bnx2x_panic();
6803#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006804
Eliezer Tamir49d66772008-02-28 11:53:13 -08006805 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006806}
6807
6808static int bnx2x_setup_leading(struct bnx2x *bp)
6809{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006810 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006811
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006812 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006813 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006814
6815 /* SETUP ramrod */
6816 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6817
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006818 /* Wait for completion */
6819 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006820
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006821 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006822}
6823
6824static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6825{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006826 struct bnx2x_fastpath *fp = &bp->fp[index];
6827
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006828 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006829 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006830
Eliezer Tamir228241e2008-02-28 11:56:57 -08006831 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006832 fp->state = BNX2X_FP_STATE_OPENING;
6833 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6834 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006835
6836 /* Wait for completion */
6837 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006838 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006839}
6840
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006841static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006842
Eilon Greenstein8badd272009-02-12 08:36:15 +00006843static void bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006844{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006845 int num_queues;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006846
Eilon Greenstein8badd272009-02-12 08:36:15 +00006847 switch (int_mode) {
6848 case INT_MODE_INTx:
6849 case INT_MODE_MSI:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006850 num_queues = 1;
6851 bp->num_rx_queues = num_queues;
6852 bp->num_tx_queues = num_queues;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006853 DP(NETIF_MSG_IFUP,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006854 "set number of queues to %d\n", num_queues);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006855 break;
6856
6857 case INT_MODE_MSIX:
6858 default:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006859 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6860 num_queues = min_t(u32, num_online_cpus(),
6861 BNX2X_MAX_QUEUES(bp));
6862 else
6863 num_queues = 1;
6864 bp->num_rx_queues = num_queues;
6865 bp->num_tx_queues = num_queues;
6866 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6867 " number of tx queues to %d\n",
6868 bp->num_rx_queues, bp->num_tx_queues);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006869 /* if we can't use MSI-X we only need one fp,
6870 * so try to enable MSI-X with the requested number of fp's
6871 * and fallback to MSI or legacy INTx with one fp
6872 */
Eilon Greenstein8badd272009-02-12 08:36:15 +00006873 if (bnx2x_enable_msix(bp)) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006874 /* failed to enable MSI-X */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006875 num_queues = 1;
6876 bp->num_rx_queues = num_queues;
6877 bp->num_tx_queues = num_queues;
6878 if (bp->multi_mode)
6879 BNX2X_ERR("Multi requested but failed to "
6880 "enable MSI-X set number of "
6881 "queues to %d\n", num_queues);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006882 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006883 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006884 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006885 bp->dev->real_num_tx_queues = bp->num_tx_queues;
Eilon Greenstein8badd272009-02-12 08:36:15 +00006886}
6887
6888static void bnx2x_set_rx_mode(struct net_device *dev);
6889
6890/* must be called with rtnl_lock */
6891static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6892{
6893 u32 load_code;
6894 int i, rc = 0;
6895#ifdef BNX2X_STOP_ON_ERROR
6896 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6897 if (unlikely(bp->panic))
6898 return -EPERM;
6899#endif
6900
6901 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6902
6903 bnx2x_set_int_mode(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006904
6905 if (bnx2x_alloc_mem(bp))
6906 return -ENOMEM;
6907
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006908 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006909 bnx2x_fp(bp, i, disable_tpa) =
6910 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6911
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006912 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006913 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6914 bnx2x_poll, 128);
6915
6916#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006917 for_each_rx_queue(bp, i) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006918 struct bnx2x_fastpath *fp = &bp->fp[i];
6919
6920 fp->poll_no_work = 0;
6921 fp->poll_calls = 0;
6922 fp->poll_max_calls = 0;
6923 fp->poll_complete = 0;
6924 fp->poll_exit = 0;
6925 }
6926#endif
6927 bnx2x_napi_enable(bp);
6928
6929 if (bp->flags & USING_MSIX_FLAG) {
6930 rc = bnx2x_req_msix_irqs(bp);
6931 if (rc) {
6932 pci_disable_msix(bp->pdev);
6933 goto load_error1;
6934 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006935 } else {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006936 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6937 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006938 bnx2x_ack_int(bp);
6939 rc = bnx2x_req_irq(bp);
6940 if (rc) {
6941 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006942 if (bp->flags & USING_MSI_FLAG)
6943 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006944 goto load_error1;
6945 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006946 if (bp->flags & USING_MSI_FLAG) {
6947 bp->dev->irq = bp->pdev->irq;
6948 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6949 bp->dev->name, bp->pdev->irq);
6950 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006951 }
6952
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006953 /* Send LOAD_REQUEST command to MCP
6954 Returns the type of LOAD command:
6955 if it is the first port to be initialized
6956 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006957 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006958 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006959 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6960 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006961 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006962 rc = -EBUSY;
6963 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006964 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006965 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6966 rc = -EBUSY; /* other port in diagnostic mode */
6967 goto load_error2;
6968 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006969
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006970 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006971 int port = BP_PORT(bp);
6972
Eilon Greensteinf5372252009-02-12 08:38:30 +00006973 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006974 load_count[0], load_count[1], load_count[2]);
6975 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006976 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00006977 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 load_count[0], load_count[1], load_count[2]);
6979 if (load_count[0] == 1)
6980 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006981 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006982 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6983 else
6984 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006985 }
6986
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006987 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6988 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6989 bp->port.pmf = 1;
6990 else
6991 bp->port.pmf = 0;
6992 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6993
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006994 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006995 rc = bnx2x_init_hw(bp, load_code);
6996 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006997 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00006998 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006999 }
7000
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007001 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007002 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007003
7004 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007005 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007006 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7007 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007008 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007009 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007010 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007011 }
7012 }
7013
7014 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7015
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007016 rc = bnx2x_setup_leading(bp);
7017 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007018 BNX2X_ERR("Setup leading failed!\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007019 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007020 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007021
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007022 if (CHIP_IS_E1H(bp))
7023 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007024 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007025 bp->state = BNX2X_STATE_DISABLED;
7026 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007027
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007028 if (bp->state == BNX2X_STATE_OPEN)
7029 for_each_nondefault_queue(bp, i) {
7030 rc = bnx2x_setup_multi(bp, i);
7031 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007032 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007033 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007034
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007035 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007036 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007037 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007038 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007039
7040 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007041 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007042
7043 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007044 switch (load_mode) {
7045 case LOAD_NORMAL:
7046 /* Tx queue should be only reenabled */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007047 netif_tx_wake_all_queues(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007048 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007049 bnx2x_set_rx_mode(bp->dev);
7050 break;
7051
7052 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007053 netif_tx_start_all_queues(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007054 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007055 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007056 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007057
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007058 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007059 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007060 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007061 bp->state = BNX2X_STATE_DIAG;
7062 break;
7063
7064 default:
7065 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007066 }
7067
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007068 if (!bp->port.pmf)
7069 bnx2x__link_status_update(bp);
7070
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007071 /* start the timer */
7072 mod_timer(&bp->timer, jiffies + bp->current_interval);
7073
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007074
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007075 return 0;
7076
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007077load_error3:
7078 bnx2x_int_disable_sync(bp, 1);
7079 if (!BP_NOMCP(bp)) {
7080 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7081 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7082 }
7083 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007084 /* Free SKBs, SGEs, TPA pool and driver internals */
7085 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007086 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007087 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007088load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007089 /* Release IRQs */
7090 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007091load_error1:
7092 bnx2x_napi_disable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007093 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007094 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007095 bnx2x_free_mem(bp);
7096
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007097 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007098}
7099
7100static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7101{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007102 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007103 int rc;
7104
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007105 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007106 fp->state = BNX2X_FP_STATE_HALTING;
7107 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007109 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007110 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007111 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007112 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007113 return rc;
7114
7115 /* delete cfc entry */
7116 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7117
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007118 /* Wait for completion */
7119 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007120 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007121 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007122}
7123
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007124static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007125{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007126 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007127 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007128 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007129 int cnt = 500;
7130 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007131
7132 might_sleep();
7133
7134 /* Send HALT ramrod */
7135 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007136 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007137
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007138 /* Wait for completion */
7139 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7140 &(bp->fp[0].state), 1);
7141 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007142 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007143
Eliezer Tamir49d66772008-02-28 11:53:13 -08007144 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007145
Eliezer Tamir228241e2008-02-28 11:56:57 -08007146 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007147 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7148
Eliezer Tamir49d66772008-02-28 11:53:13 -08007149 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007150 we are going to reset the chip anyway
7151 so there is not much to do if this times out
7152 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007153 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007154 if (!cnt) {
7155 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7156 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7157 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7158#ifdef BNX2X_STOP_ON_ERROR
7159 bnx2x_panic();
7160#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007161 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007162 break;
7163 }
7164 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007165 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007166 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007167 }
7168 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7169 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007170
7171 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007172}
7173
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007174static void bnx2x_reset_func(struct bnx2x *bp)
7175{
7176 int port = BP_PORT(bp);
7177 int func = BP_FUNC(bp);
7178 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007179
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007180 /* Configure IGU */
7181 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7182 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7183
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007184 /* Clear ILT */
7185 base = FUNC_ILT_BASE(func);
7186 for (i = base; i < base + ILT_PER_FUNC; i++)
7187 bnx2x_ilt_wr(bp, i, 0);
7188}
7189
7190static void bnx2x_reset_port(struct bnx2x *bp)
7191{
7192 int port = BP_PORT(bp);
7193 u32 val;
7194
7195 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7196
7197 /* Do not rcv packets to BRB */
7198 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7199 /* Do not direct rcv packets that are not for MCP to the BRB */
7200 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7201 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7202
7203 /* Configure AEU */
7204 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7205
7206 msleep(100);
7207 /* Check for BRB port occupancy */
7208 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7209 if (val)
7210 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007211 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007212
7213 /* TODO: Close Doorbell port? */
7214}
7215
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007216static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7217{
7218 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7219 BP_FUNC(bp), reset_code);
7220
7221 switch (reset_code) {
7222 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7223 bnx2x_reset_port(bp);
7224 bnx2x_reset_func(bp);
7225 bnx2x_reset_common(bp);
7226 break;
7227
7228 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7229 bnx2x_reset_port(bp);
7230 bnx2x_reset_func(bp);
7231 break;
7232
7233 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7234 bnx2x_reset_func(bp);
7235 break;
7236
7237 default:
7238 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7239 break;
7240 }
7241}
7242
Eilon Greenstein33471622008-08-13 15:59:08 -07007243/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007244static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007245{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007246 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007247 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007248 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007249
7250 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7251
Eliezer Tamir228241e2008-02-28 11:56:57 -08007252 bp->rx_mode = BNX2X_RX_MODE_NONE;
7253 bnx2x_set_storm_rx_mode(bp);
7254
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007255 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007256
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007257 del_timer_sync(&bp->timer);
7258 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7259 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007260 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007261
Eilon Greenstein70b99862009-01-14 06:43:48 +00007262 /* Release IRQs */
7263 bnx2x_free_irq(bp);
7264
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007265 /* Wait until tx fastpath tasks complete */
7266 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007267 struct bnx2x_fastpath *fp = &bp->fp[i];
7268
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007269 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007270 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007271
Eilon Greenstein7961f792009-03-02 07:59:31 +00007272 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007273 if (!cnt) {
7274 BNX2X_ERR("timeout waiting for queue[%d]\n",
7275 i);
7276#ifdef BNX2X_STOP_ON_ERROR
7277 bnx2x_panic();
7278 return -EBUSY;
7279#else
7280 break;
7281#endif
7282 }
7283 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007284 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007285 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007286 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007287 /* Give HW time to discard old tx messages */
7288 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007289
Yitchak Gertner65abd742008-08-25 15:26:24 -07007290 if (CHIP_IS_E1(bp)) {
7291 struct mac_configuration_cmd *config =
7292 bnx2x_sp(bp, mcast_config);
7293
7294 bnx2x_set_mac_addr_e1(bp, 0);
7295
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007296 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007297 CAM_INVALIDATE(config->config_table[i]);
7298
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007299 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007300 if (CHIP_REV_IS_SLOW(bp))
7301 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7302 else
7303 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007304 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007305 config->hdr.reserved1 = 0;
7306
7307 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7308 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7309 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7310
7311 } else { /* E1H */
7312 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7313
7314 bnx2x_set_mac_addr_e1h(bp, 0);
7315
7316 for (i = 0; i < MC_HASH_SIZE; i++)
7317 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7318 }
7319
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007320 if (unload_mode == UNLOAD_NORMAL)
7321 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007322
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007323 else if (bp->flags & NO_WOL_FLAG) {
7324 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7325 if (CHIP_IS_E1H(bp))
7326 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7327
7328 } else if (bp->wol) {
7329 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007330 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007331 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007332 /* The mac address is written to entries 1-4 to
7333 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007334 u8 entry = (BP_E1HVN(bp) + 1)*8;
7335
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007336 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007337 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007338
7339 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7340 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007341 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007342
7343 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007344
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007345 } else
7346 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7347
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007348 /* Close multi and leading connections
7349 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350 for_each_nondefault_queue(bp, i)
7351 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007352 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007353
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007354 rc = bnx2x_stop_leading(bp);
7355 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007356 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007357#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007358 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007359#else
7360 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007361#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007362 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007363
Eliezer Tamir228241e2008-02-28 11:56:57 -08007364unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007365 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007366 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007367 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007368 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007369 load_count[0], load_count[1], load_count[2]);
7370 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007371 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007372 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007373 load_count[0], load_count[1], load_count[2]);
7374 if (load_count[0] == 0)
7375 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007376 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007377 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7378 else
7379 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7380 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007381
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007382 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7383 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7384 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007385
7386 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007387 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007388
7389 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007390 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007391 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007392
Eilon Greenstein9a035442008-11-03 16:45:55 -08007393 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007394
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007395 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007396 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007397 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007398 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007399 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007400 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007401 bnx2x_free_mem(bp);
7402
7403 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007404
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007405 netif_carrier_off(bp->dev);
7406
7407 return 0;
7408}
7409
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007410static void bnx2x_reset_task(struct work_struct *work)
7411{
7412 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7413
7414#ifdef BNX2X_STOP_ON_ERROR
7415 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7416 " so reset not done to allow debug dump,\n"
7417 KERN_ERR " you will need to reboot when done\n");
7418 return;
7419#endif
7420
7421 rtnl_lock();
7422
7423 if (!netif_running(bp->dev))
7424 goto reset_task_exit;
7425
7426 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7427 bnx2x_nic_load(bp, LOAD_NORMAL);
7428
7429reset_task_exit:
7430 rtnl_unlock();
7431}
7432
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007433/* end of nic load/unload */
7434
7435/* ethtool_ops */
7436
7437/*
7438 * Init service functions
7439 */
7440
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007441static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7442{
7443 switch (func) {
7444 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7445 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7446 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7447 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7448 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7449 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7450 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7451 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7452 default:
7453 BNX2X_ERR("Unsupported function index: %d\n", func);
7454 return (u32)(-1);
7455 }
7456}
7457
7458static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7459{
7460 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7461
7462 /* Flush all outstanding writes */
7463 mmiowb();
7464
7465 /* Pretend to be function 0 */
7466 REG_WR(bp, reg, 0);
7467 /* Flush the GRC transaction (in the chip) */
7468 new_val = REG_RD(bp, reg);
7469 if (new_val != 0) {
7470 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7471 new_val);
7472 BUG();
7473 }
7474
7475 /* From now we are in the "like-E1" mode */
7476 bnx2x_int_disable(bp);
7477
7478 /* Flush all outstanding writes */
7479 mmiowb();
7480
7481 /* Restore the original funtion settings */
7482 REG_WR(bp, reg, orig_func);
7483 new_val = REG_RD(bp, reg);
7484 if (new_val != orig_func) {
7485 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7486 orig_func, new_val);
7487 BUG();
7488 }
7489}
7490
7491static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7492{
7493 if (CHIP_IS_E1H(bp))
7494 bnx2x_undi_int_disable_e1h(bp, func);
7495 else
7496 bnx2x_int_disable(bp);
7497}
7498
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007499static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007500{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007501 u32 val;
7502
7503 /* Check if there is any driver already loaded */
7504 val = REG_RD(bp, MISC_REG_UNPREPARED);
7505 if (val == 0x1) {
7506 /* Check if it is the UNDI driver
7507 * UNDI driver initializes CID offset for normal bell to 0x7
7508 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007509 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007510 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7511 if (val == 0x7) {
7512 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007513 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007514 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007515 u32 swap_en;
7516 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007517
Eilon Greensteinb4661732009-01-14 06:43:56 +00007518 /* clear the UNDI indication */
7519 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7520
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007521 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7522
7523 /* try unload UNDI on port 0 */
7524 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007525 bp->fw_seq =
7526 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7527 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007528 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007529
7530 /* if UNDI is loaded on the other port */
7531 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7532
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007533 /* send "DONE" for previous unload */
7534 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7535
7536 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007537 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007538 bp->fw_seq =
7539 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7540 DRV_MSG_SEQ_NUMBER_MASK);
7541 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007542
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007543 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007544 }
7545
Eilon Greensteinb4661732009-01-14 06:43:56 +00007546 /* now it's safe to release the lock */
7547 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7548
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007549 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007550
7551 /* close input traffic and wait for it */
7552 /* Do not rcv packets to BRB */
7553 REG_WR(bp,
7554 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7555 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7556 /* Do not direct rcv packets that are not for MCP to
7557 * the BRB */
7558 REG_WR(bp,
7559 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7560 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7561 /* clear AEU */
7562 REG_WR(bp,
7563 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7564 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7565 msleep(10);
7566
7567 /* save NIG port swap info */
7568 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7569 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007570 /* reset device */
7571 REG_WR(bp,
7572 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007573 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007574 REG_WR(bp,
7575 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7576 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007577 /* take the NIG out of reset and restore swap values */
7578 REG_WR(bp,
7579 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7580 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7581 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7582 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7583
7584 /* send unload done to the MCP */
7585 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7586
7587 /* restore our func and fw_seq */
7588 bp->func = func;
7589 bp->fw_seq =
7590 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7591 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007592
7593 } else
7594 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007595 }
7596}
7597
7598static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7599{
7600 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007601 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007602
7603 /* Get the chip revision id and number. */
7604 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7605 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7606 id = ((val & 0xffff) << 16);
7607 val = REG_RD(bp, MISC_REG_CHIP_REV);
7608 id |= ((val & 0xf) << 12);
7609 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7610 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007611 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007612 id |= (val & 0xf);
7613 bp->common.chip_id = id;
7614 bp->link_params.chip_id = bp->common.chip_id;
7615 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7616
Eilon Greenstein1c063282009-02-12 08:36:43 +00007617 val = (REG_RD(bp, 0x2874) & 0x55);
7618 if ((bp->common.chip_id & 0x1) ||
7619 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7620 bp->flags |= ONE_PORT_FLAG;
7621 BNX2X_DEV_INFO("single port device\n");
7622 }
7623
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007624 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7625 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7626 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7627 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7628 bp->common.flash_size, bp->common.flash_size);
7629
7630 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7631 bp->link_params.shmem_base = bp->common.shmem_base;
7632 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7633
7634 if (!bp->common.shmem_base ||
7635 (bp->common.shmem_base < 0xA0000) ||
7636 (bp->common.shmem_base >= 0xC0000)) {
7637 BNX2X_DEV_INFO("MCP not active\n");
7638 bp->flags |= NO_MCP_FLAG;
7639 return;
7640 }
7641
7642 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7643 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7644 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7645 BNX2X_ERR("BAD MCP validity signature\n");
7646
7647 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007648 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007649
7650 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7651 SHARED_HW_CFG_LED_MODE_MASK) >>
7652 SHARED_HW_CFG_LED_MODE_SHIFT);
7653
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007654 bp->link_params.feature_config_flags = 0;
7655 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7656 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7657 bp->link_params.feature_config_flags |=
7658 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7659 else
7660 bp->link_params.feature_config_flags &=
7661 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7662
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007663 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7664 bp->common.bc_ver = val;
7665 BNX2X_DEV_INFO("bc_ver %X\n", val);
7666 if (val < BNX2X_BC_VER) {
7667 /* for now only warn
7668 * later we might need to enforce this */
7669 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7670 " please upgrade BC\n", BNX2X_BC_VER, val);
7671 }
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007672
7673 if (BP_E1HVN(bp) == 0) {
7674 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7675 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7676 } else {
7677 /* no WOL capability for E1HVN != 0 */
7678 bp->flags |= NO_WOL_FLAG;
7679 }
7680 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007681 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007682
7683 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7684 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7685 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7686 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7687
7688 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7689 val, val2, val3, val4);
7690}
7691
7692static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7693 u32 switch_cfg)
7694{
7695 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007696 u32 ext_phy_type;
7697
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007698 switch (switch_cfg) {
7699 case SWITCH_CFG_1G:
7700 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7701
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007702 ext_phy_type =
7703 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007704 switch (ext_phy_type) {
7705 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7706 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7707 ext_phy_type);
7708
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007709 bp->port.supported |= (SUPPORTED_10baseT_Half |
7710 SUPPORTED_10baseT_Full |
7711 SUPPORTED_100baseT_Half |
7712 SUPPORTED_100baseT_Full |
7713 SUPPORTED_1000baseT_Full |
7714 SUPPORTED_2500baseX_Full |
7715 SUPPORTED_TP |
7716 SUPPORTED_FIBRE |
7717 SUPPORTED_Autoneg |
7718 SUPPORTED_Pause |
7719 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007720 break;
7721
7722 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7724 ext_phy_type);
7725
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007726 bp->port.supported |= (SUPPORTED_10baseT_Half |
7727 SUPPORTED_10baseT_Full |
7728 SUPPORTED_100baseT_Half |
7729 SUPPORTED_100baseT_Full |
7730 SUPPORTED_1000baseT_Full |
7731 SUPPORTED_TP |
7732 SUPPORTED_FIBRE |
7733 SUPPORTED_Autoneg |
7734 SUPPORTED_Pause |
7735 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007736 break;
7737
7738 default:
7739 BNX2X_ERR("NVRAM config error. "
7740 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007741 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007742 return;
7743 }
7744
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007745 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7746 port*0x10);
7747 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007748 break;
7749
7750 case SWITCH_CFG_10G:
7751 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7752
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007753 ext_phy_type =
7754 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007755 switch (ext_phy_type) {
7756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7757 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7758 ext_phy_type);
7759
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007760 bp->port.supported |= (SUPPORTED_10baseT_Half |
7761 SUPPORTED_10baseT_Full |
7762 SUPPORTED_100baseT_Half |
7763 SUPPORTED_100baseT_Full |
7764 SUPPORTED_1000baseT_Full |
7765 SUPPORTED_2500baseX_Full |
7766 SUPPORTED_10000baseT_Full |
7767 SUPPORTED_TP |
7768 SUPPORTED_FIBRE |
7769 SUPPORTED_Autoneg |
7770 SUPPORTED_Pause |
7771 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007772 break;
7773
Eliezer Tamirf1410642008-02-28 11:51:50 -08007774 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7775 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7776 ext_phy_type);
7777
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007778 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7779 SUPPORTED_1000baseT_Full |
7780 SUPPORTED_FIBRE |
7781 SUPPORTED_Autoneg |
7782 SUPPORTED_Pause |
7783 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007784 break;
7785
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007786 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7787 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7788 ext_phy_type);
7789
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007790 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7791 SUPPORTED_2500baseX_Full |
7792 SUPPORTED_1000baseT_Full |
7793 SUPPORTED_FIBRE |
7794 SUPPORTED_Autoneg |
7795 SUPPORTED_Pause |
7796 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007797 break;
7798
Eilon Greenstein589abe32009-02-12 08:36:55 +00007799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7800 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7801 ext_phy_type);
7802
7803 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7804 SUPPORTED_FIBRE |
7805 SUPPORTED_Pause |
7806 SUPPORTED_Asym_Pause);
7807 break;
7808
7809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7810 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7811 ext_phy_type);
7812
7813 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7814 SUPPORTED_1000baseT_Full |
7815 SUPPORTED_FIBRE |
7816 SUPPORTED_Pause |
7817 SUPPORTED_Asym_Pause);
7818 break;
7819
7820 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7821 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7822 ext_phy_type);
7823
7824 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7825 SUPPORTED_1000baseT_Full |
7826 SUPPORTED_Autoneg |
7827 SUPPORTED_FIBRE |
7828 SUPPORTED_Pause |
7829 SUPPORTED_Asym_Pause);
7830 break;
7831
Eliezer Tamirf1410642008-02-28 11:51:50 -08007832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7833 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7834 ext_phy_type);
7835
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007836 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7837 SUPPORTED_TP |
7838 SUPPORTED_Autoneg |
7839 SUPPORTED_Pause |
7840 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08007841 break;
7842
Eilon Greenstein28577182009-02-12 08:37:00 +00007843 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7844 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7845 ext_phy_type);
7846
7847 bp->port.supported |= (SUPPORTED_10baseT_Half |
7848 SUPPORTED_10baseT_Full |
7849 SUPPORTED_100baseT_Half |
7850 SUPPORTED_100baseT_Full |
7851 SUPPORTED_1000baseT_Full |
7852 SUPPORTED_10000baseT_Full |
7853 SUPPORTED_TP |
7854 SUPPORTED_Autoneg |
7855 SUPPORTED_Pause |
7856 SUPPORTED_Asym_Pause);
7857 break;
7858
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007859 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7860 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7861 bp->link_params.ext_phy_config);
7862 break;
7863
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007864 default:
7865 BNX2X_ERR("NVRAM config error. "
7866 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007867 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007868 return;
7869 }
7870
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007871 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7872 port*0x18);
7873 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007874
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007875 break;
7876
7877 default:
7878 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007879 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007880 return;
7881 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007882 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007883
7884 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007885 if (!(bp->link_params.speed_cap_mask &
7886 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007887 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007888
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007889 if (!(bp->link_params.speed_cap_mask &
7890 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007891 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007892
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007893 if (!(bp->link_params.speed_cap_mask &
7894 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007895 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007896
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007897 if (!(bp->link_params.speed_cap_mask &
7898 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007899 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007900
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007901 if (!(bp->link_params.speed_cap_mask &
7902 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007903 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7904 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007905
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007906 if (!(bp->link_params.speed_cap_mask &
7907 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007908 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007909
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007910 if (!(bp->link_params.speed_cap_mask &
7911 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007912 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007913
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007914 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007915}
7916
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007917static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007918{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007919 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007920
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007921 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007922 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007923 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007924 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007925 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007926 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007927 u32 ext_phy_type =
7928 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7929
7930 if ((ext_phy_type ==
7931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7932 (ext_phy_type ==
7933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007934 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007935 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007936 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007937 (ADVERTISED_10000baseT_Full |
7938 ADVERTISED_FIBRE);
7939 break;
7940 }
7941 BNX2X_ERR("NVRAM config error. "
7942 "Invalid link_config 0x%x"
7943 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007944 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007945 return;
7946 }
7947 break;
7948
7949 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007950 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007951 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007952 bp->port.advertising = (ADVERTISED_10baseT_Full |
7953 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007954 } else {
7955 BNX2X_ERR("NVRAM config error. "
7956 "Invalid link_config 0x%x"
7957 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007958 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007959 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007960 return;
7961 }
7962 break;
7963
7964 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007965 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007966 bp->link_params.req_line_speed = SPEED_10;
7967 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007968 bp->port.advertising = (ADVERTISED_10baseT_Half |
7969 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007970 } else {
7971 BNX2X_ERR("NVRAM config error. "
7972 "Invalid link_config 0x%x"
7973 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007974 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007975 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007976 return;
7977 }
7978 break;
7979
7980 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007981 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007982 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007983 bp->port.advertising = (ADVERTISED_100baseT_Full |
7984 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007985 } else {
7986 BNX2X_ERR("NVRAM config error. "
7987 "Invalid link_config 0x%x"
7988 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007990 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007991 return;
7992 }
7993 break;
7994
7995 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007996 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007997 bp->link_params.req_line_speed = SPEED_100;
7998 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007999 bp->port.advertising = (ADVERTISED_100baseT_Half |
8000 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001 } else {
8002 BNX2X_ERR("NVRAM config error. "
8003 "Invalid link_config 0x%x"
8004 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008005 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008006 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008007 return;
8008 }
8009 break;
8010
8011 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008012 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008013 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008014 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8015 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008016 } else {
8017 BNX2X_ERR("NVRAM config error. "
8018 "Invalid link_config 0x%x"
8019 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008020 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008021 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008022 return;
8023 }
8024 break;
8025
8026 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008027 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008028 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008029 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8030 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008031 } else {
8032 BNX2X_ERR("NVRAM config error. "
8033 "Invalid link_config 0x%x"
8034 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008035 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008036 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008037 return;
8038 }
8039 break;
8040
8041 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8042 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8043 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008044 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008045 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008046 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8047 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008048 } else {
8049 BNX2X_ERR("NVRAM config error. "
8050 "Invalid link_config 0x%x"
8051 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008052 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008053 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008054 return;
8055 }
8056 break;
8057
8058 default:
8059 BNX2X_ERR("NVRAM config error. "
8060 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008061 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008062 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008063 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008064 break;
8065 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008066
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008067 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8068 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008069 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008070 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008071 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008072
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008073 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008074 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008075 bp->link_params.req_line_speed,
8076 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008077 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008078}
8079
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008080static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008081{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008082 int port = BP_PORT(bp);
8083 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008084 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008085 u16 i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008086
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008087 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008088 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008089
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008090 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008091 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008092 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008093 SHMEM_RD(bp,
8094 dev_info.port_hw_config[port].external_phy_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008095 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008096 SHMEM_RD(bp,
8097 dev_info.port_hw_config[port].speed_capability_mask);
8098
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008099 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008100 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8101
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008102 /* Get the 4 lanes xgxs config rx and tx */
8103 for (i = 0; i < 2; i++) {
8104 val = SHMEM_RD(bp,
8105 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8106 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8107 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8108
8109 val = SHMEM_RD(bp,
8110 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8111 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8112 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8113 }
8114
Eilon Greenstein589abe32009-02-12 08:36:55 +00008115 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8116 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8117 bp->link_params.feature_config_flags |=
8118 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8119 else
8120 bp->link_params.feature_config_flags &=
8121 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8122
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008123 /* If the device is capable of WoL, set the default state according
8124 * to the HW
8125 */
8126 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8127 (config & PORT_FEATURE_WOL_ENABLED));
8128
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008129 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8130 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008131 bp->link_params.lane_config,
8132 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008133 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008134
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008135 bp->link_params.switch_cfg = (bp->port.link_config &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008136 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8137 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008138
8139 bnx2x_link_settings_requested(bp);
8140
8141 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8142 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8143 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8144 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8145 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8146 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8147 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8148 bp->dev->dev_addr[5] = (u8)(val & 0xff);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008149 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8150 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008151}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008152
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008153static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8154{
8155 int func = BP_FUNC(bp);
8156 u32 val, val2;
8157 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008158
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008159 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008160
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008161 bp->e1hov = 0;
8162 bp->e1hmf = 0;
8163 if (CHIP_IS_E1H(bp)) {
8164 bp->mf_config =
8165 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008166
Eilon Greenstein3196a882008-08-13 15:58:49 -07008167 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8168 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008169 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008170
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008171 bp->e1hov = val;
8172 bp->e1hmf = 1;
8173 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8174 "(0x%04x)\n",
8175 func, bp->e1hov, bp->e1hov);
8176 } else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00008177 BNX2X_DEV_INFO("single function mode\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008178 if (BP_E1HVN(bp)) {
8179 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8180 " aborting\n", func);
8181 rc = -EPERM;
8182 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008183 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008184 }
8185
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008186 if (!BP_NOMCP(bp)) {
8187 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008188
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008189 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8190 DRV_MSG_SEQ_NUMBER_MASK);
8191 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8192 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008194 if (IS_E1HMF(bp)) {
8195 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8196 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8197 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8198 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8199 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8200 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8201 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8202 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8203 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8204 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8205 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8206 ETH_ALEN);
8207 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8208 ETH_ALEN);
8209 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008210
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008211 return rc;
8212 }
8213
8214 if (BP_NOMCP(bp)) {
8215 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008216 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008217 random_ether_addr(bp->dev->dev_addr);
8218 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8219 }
8220
8221 return rc;
8222}
8223
8224static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8225{
8226 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008227 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008228 int rc;
8229
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008230 /* Disable interrupt handling until HW is initialized */
8231 atomic_set(&bp->intr_sem, 1);
8232
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008233 mutex_init(&bp->port.phy_mutex);
8234
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008235 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008236 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8237
8238 rc = bnx2x_get_hwinfo(bp);
8239
8240 /* need to reset chip if undi was active */
8241 if (!BP_NOMCP(bp))
8242 bnx2x_undi_unload(bp);
8243
8244 if (CHIP_REV_IS_FPGA(bp))
8245 printk(KERN_ERR PFX "FPGA detected\n");
8246
8247 if (BP_NOMCP(bp) && (func == 0))
8248 printk(KERN_ERR PFX
8249 "MCP disabled, must load devices in order!\n");
8250
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008251 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008252 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8253 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008254 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008255 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008256 multi_mode = ETH_RSS_MODE_DISABLED;
8257 }
8258 bp->multi_mode = multi_mode;
8259
8260
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008261 /* Set TPA flags */
8262 if (disable_tpa) {
8263 bp->flags &= ~TPA_ENABLE_FLAG;
8264 bp->dev->features &= ~NETIF_F_LRO;
8265 } else {
8266 bp->flags |= TPA_ENABLE_FLAG;
8267 bp->dev->features |= NETIF_F_LRO;
8268 }
8269
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008270 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008271
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008272 bp->tx_ring_size = MAX_TX_AVAIL;
8273 bp->rx_ring_size = MAX_RX_AVAIL;
8274
8275 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008276
8277 bp->tx_ticks = 50;
8278 bp->rx_ticks = 25;
8279
Eilon Greenstein87942b42009-02-12 08:36:49 +00008280 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8281 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008282
8283 init_timer(&bp->timer);
8284 bp->timer.expires = jiffies + bp->current_interval;
8285 bp->timer.data = (unsigned long) bp;
8286 bp->timer.function = bnx2x_timer;
8287
8288 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008289}
8290
8291/*
8292 * ethtool service functions
8293 */
8294
8295/* All ethtool functions called with rtnl_lock */
8296
8297static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8298{
8299 struct bnx2x *bp = netdev_priv(dev);
8300
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008301 cmd->supported = bp->port.supported;
8302 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008303
8304 if (netif_carrier_ok(dev)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008305 cmd->speed = bp->link_vars.line_speed;
8306 cmd->duplex = bp->link_vars.duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008307 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008308 cmd->speed = bp->link_params.req_line_speed;
8309 cmd->duplex = bp->link_params.req_duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008310 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008311 if (IS_E1HMF(bp)) {
8312 u16 vn_max_rate;
8313
8314 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8315 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8316 if (vn_max_rate < cmd->speed)
8317 cmd->speed = vn_max_rate;
8318 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008319
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008320 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8321 u32 ext_phy_type =
8322 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008323
8324 switch (ext_phy_type) {
8325 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008326 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008327 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00008328 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8330 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008331 cmd->port = PORT_FIBRE;
8332 break;
8333
8334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00008335 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008336 cmd->port = PORT_TP;
8337 break;
8338
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8340 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8341 bp->link_params.ext_phy_config);
8342 break;
8343
Eliezer Tamirf1410642008-02-28 11:51:50 -08008344 default:
8345 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008346 bp->link_params.ext_phy_config);
8347 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008348 }
8349 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008350 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008351
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008352 cmd->phy_address = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008353 cmd->transceiver = XCVR_INTERNAL;
8354
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008355 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008356 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008357 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008358 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008359
8360 cmd->maxtxpkt = 0;
8361 cmd->maxrxpkt = 0;
8362
8363 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8364 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8365 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8366 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8367 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8368 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8369 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8370
8371 return 0;
8372}
8373
8374static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8375{
8376 struct bnx2x *bp = netdev_priv(dev);
8377 u32 advertising;
8378
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008379 if (IS_E1HMF(bp))
8380 return 0;
8381
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008382 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8383 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8384 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8385 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8386 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8387 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8388 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008390 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008391 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8392 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008393 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008394 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008395
8396 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008397 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008398
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008399 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8400 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008401 bp->port.advertising |= (ADVERTISED_Autoneg |
8402 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008403
8404 } else { /* forced speed */
8405 /* advertise the requested speed and duplex if supported */
8406 switch (cmd->speed) {
8407 case SPEED_10:
8408 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008409 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008410 SUPPORTED_10baseT_Full)) {
8411 DP(NETIF_MSG_LINK,
8412 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008413 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008414 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008415
8416 advertising = (ADVERTISED_10baseT_Full |
8417 ADVERTISED_TP);
8418 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008419 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008420 SUPPORTED_10baseT_Half)) {
8421 DP(NETIF_MSG_LINK,
8422 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008423 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008424 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008425
8426 advertising = (ADVERTISED_10baseT_Half |
8427 ADVERTISED_TP);
8428 }
8429 break;
8430
8431 case SPEED_100:
8432 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008433 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008434 SUPPORTED_100baseT_Full)) {
8435 DP(NETIF_MSG_LINK,
8436 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008437 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008438 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008439
8440 advertising = (ADVERTISED_100baseT_Full |
8441 ADVERTISED_TP);
8442 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008443 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008444 SUPPORTED_100baseT_Half)) {
8445 DP(NETIF_MSG_LINK,
8446 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008447 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008448 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008449
8450 advertising = (ADVERTISED_100baseT_Half |
8451 ADVERTISED_TP);
8452 }
8453 break;
8454
8455 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008456 if (cmd->duplex != DUPLEX_FULL) {
8457 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008458 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008459 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008460
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008461 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008462 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008463 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008464 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008465
8466 advertising = (ADVERTISED_1000baseT_Full |
8467 ADVERTISED_TP);
8468 break;
8469
8470 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008471 if (cmd->duplex != DUPLEX_FULL) {
8472 DP(NETIF_MSG_LINK,
8473 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008474 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008475 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008476
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008477 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008478 DP(NETIF_MSG_LINK,
8479 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008480 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008481 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008482
Eliezer Tamirf1410642008-02-28 11:51:50 -08008483 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008484 ADVERTISED_TP);
8485 break;
8486
8487 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008488 if (cmd->duplex != DUPLEX_FULL) {
8489 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008490 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008491 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008492
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008493 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008494 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008495 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008496 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008497
8498 advertising = (ADVERTISED_10000baseT_Full |
8499 ADVERTISED_FIBRE);
8500 break;
8501
8502 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008503 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008504 return -EINVAL;
8505 }
8506
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008507 bp->link_params.req_line_speed = cmd->speed;
8508 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008509 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008510 }
8511
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008512 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008513 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008514 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008515 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008516
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008517 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008518 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008519 bnx2x_link_set(bp);
8520 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008521
8522 return 0;
8523}
8524
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008525#define PHY_FW_VER_LEN 10
8526
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008527static void bnx2x_get_drvinfo(struct net_device *dev,
8528 struct ethtool_drvinfo *info)
8529{
8530 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008531 u8 phy_fw_ver[PHY_FW_VER_LEN];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008532
8533 strcpy(info->driver, DRV_MODULE_NAME);
8534 strcpy(info->version, DRV_MODULE_VERSION);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008535
8536 phy_fw_ver[0] = '\0';
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008537 if (bp->port.pmf) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008538 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008539 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8540 (bp->state != BNX2X_STATE_CLOSED),
8541 phy_fw_ver, PHY_FW_VER_LEN);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008542 bnx2x_release_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008543 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008544
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008545 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8546 (bp->common.bc_ver & 0xff0000) >> 16,
8547 (bp->common.bc_ver & 0xff00) >> 8,
8548 (bp->common.bc_ver & 0xff),
8549 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550 strcpy(info->bus_info, pci_name(bp->pdev));
8551 info->n_stats = BNX2X_NUM_STATS;
8552 info->testinfo_len = BNX2X_NUM_TESTS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008553 info->eedump_len = bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008554 info->regdump_len = 0;
8555}
8556
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00008557#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8558#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8559
8560static int bnx2x_get_regs_len(struct net_device *dev)
8561{
8562 static u32 regdump_len;
8563 struct bnx2x *bp = netdev_priv(dev);
8564 int i;
8565
8566 if (regdump_len)
8567 return regdump_len;
8568
8569 if (CHIP_IS_E1(bp)) {
8570 for (i = 0; i < REGS_COUNT; i++)
8571 if (IS_E1_ONLINE(reg_addrs[i].info))
8572 regdump_len += reg_addrs[i].size;
8573
8574 for (i = 0; i < WREGS_COUNT_E1; i++)
8575 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8576 regdump_len += wreg_addrs_e1[i].size *
8577 (1 + wreg_addrs_e1[i].read_regs_count);
8578
8579 } else { /* E1H */
8580 for (i = 0; i < REGS_COUNT; i++)
8581 if (IS_E1H_ONLINE(reg_addrs[i].info))
8582 regdump_len += reg_addrs[i].size;
8583
8584 for (i = 0; i < WREGS_COUNT_E1H; i++)
8585 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8586 regdump_len += wreg_addrs_e1h[i].size *
8587 (1 + wreg_addrs_e1h[i].read_regs_count);
8588 }
8589 regdump_len *= 4;
8590 regdump_len += sizeof(struct dump_hdr);
8591
8592 return regdump_len;
8593}
8594
8595static void bnx2x_get_regs(struct net_device *dev,
8596 struct ethtool_regs *regs, void *_p)
8597{
8598 u32 *p = _p, i, j;
8599 struct bnx2x *bp = netdev_priv(dev);
8600 struct dump_hdr dump_hdr = {0};
8601
8602 regs->version = 0;
8603 memset(p, 0, regs->len);
8604
8605 if (!netif_running(bp->dev))
8606 return;
8607
8608 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8609 dump_hdr.dump_sign = dump_sign_all;
8610 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8611 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8612 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8613 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8614 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8615
8616 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8617 p += dump_hdr.hdr_size + 1;
8618
8619 if (CHIP_IS_E1(bp)) {
8620 for (i = 0; i < REGS_COUNT; i++)
8621 if (IS_E1_ONLINE(reg_addrs[i].info))
8622 for (j = 0; j < reg_addrs[i].size; j++)
8623 *p++ = REG_RD(bp,
8624 reg_addrs[i].addr + j*4);
8625
8626 } else { /* E1H */
8627 for (i = 0; i < REGS_COUNT; i++)
8628 if (IS_E1H_ONLINE(reg_addrs[i].info))
8629 for (j = 0; j < reg_addrs[i].size; j++)
8630 *p++ = REG_RD(bp,
8631 reg_addrs[i].addr + j*4);
8632 }
8633}
8634
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008635static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8636{
8637 struct bnx2x *bp = netdev_priv(dev);
8638
8639 if (bp->flags & NO_WOL_FLAG) {
8640 wol->supported = 0;
8641 wol->wolopts = 0;
8642 } else {
8643 wol->supported = WAKE_MAGIC;
8644 if (bp->wol)
8645 wol->wolopts = WAKE_MAGIC;
8646 else
8647 wol->wolopts = 0;
8648 }
8649 memset(&wol->sopass, 0, sizeof(wol->sopass));
8650}
8651
8652static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8653{
8654 struct bnx2x *bp = netdev_priv(dev);
8655
8656 if (wol->wolopts & ~WAKE_MAGIC)
8657 return -EINVAL;
8658
8659 if (wol->wolopts & WAKE_MAGIC) {
8660 if (bp->flags & NO_WOL_FLAG)
8661 return -EINVAL;
8662
8663 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008664 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008665 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008666
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008667 return 0;
8668}
8669
8670static u32 bnx2x_get_msglevel(struct net_device *dev)
8671{
8672 struct bnx2x *bp = netdev_priv(dev);
8673
8674 return bp->msglevel;
8675}
8676
8677static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8678{
8679 struct bnx2x *bp = netdev_priv(dev);
8680
8681 if (capable(CAP_NET_ADMIN))
8682 bp->msglevel = level;
8683}
8684
8685static int bnx2x_nway_reset(struct net_device *dev)
8686{
8687 struct bnx2x *bp = netdev_priv(dev);
8688
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008689 if (!bp->port.pmf)
8690 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008692 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008693 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008694 bnx2x_link_set(bp);
8695 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008696
8697 return 0;
8698}
8699
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07008700static u32
8701bnx2x_get_link(struct net_device *dev)
8702{
8703 struct bnx2x *bp = netdev_priv(dev);
8704
8705 return bp->link_vars.link_up;
8706}
8707
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008708static int bnx2x_get_eeprom_len(struct net_device *dev)
8709{
8710 struct bnx2x *bp = netdev_priv(dev);
8711
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008713}
8714
8715static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8716{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008717 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008718 int count, i;
8719 u32 val = 0;
8720
8721 /* adjust timeout for emulation/FPGA */
8722 count = NVRAM_TIMEOUT_COUNT;
8723 if (CHIP_REV_IS_SLOW(bp))
8724 count *= 100;
8725
8726 /* request access to nvram interface */
8727 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8728 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8729
8730 for (i = 0; i < count*10; i++) {
8731 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8732 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8733 break;
8734
8735 udelay(5);
8736 }
8737
8738 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008739 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740 return -EBUSY;
8741 }
8742
8743 return 0;
8744}
8745
8746static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8747{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008748 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008749 int count, i;
8750 u32 val = 0;
8751
8752 /* adjust timeout for emulation/FPGA */
8753 count = NVRAM_TIMEOUT_COUNT;
8754 if (CHIP_REV_IS_SLOW(bp))
8755 count *= 100;
8756
8757 /* relinquish nvram interface */
8758 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8759 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8760
8761 for (i = 0; i < count*10; i++) {
8762 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8763 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8764 break;
8765
8766 udelay(5);
8767 }
8768
8769 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008770 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008771 return -EBUSY;
8772 }
8773
8774 return 0;
8775}
8776
8777static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8778{
8779 u32 val;
8780
8781 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8782
8783 /* enable both bits, even on read */
8784 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8785 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8786 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8787}
8788
8789static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8790{
8791 u32 val;
8792
8793 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8794
8795 /* disable both bits, even after read */
8796 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8797 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8798 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8799}
8800
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008801static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008802 u32 cmd_flags)
8803{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008804 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008805 u32 val;
8806
8807 /* build the command word */
8808 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8809
8810 /* need to clear DONE bit separately */
8811 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8812
8813 /* address of the NVRAM to read from */
8814 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8815 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8816
8817 /* issue a read command */
8818 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8819
8820 /* adjust timeout for emulation/FPGA */
8821 count = NVRAM_TIMEOUT_COUNT;
8822 if (CHIP_REV_IS_SLOW(bp))
8823 count *= 100;
8824
8825 /* wait for completion */
8826 *ret_val = 0;
8827 rc = -EBUSY;
8828 for (i = 0; i < count; i++) {
8829 udelay(5);
8830 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8831
8832 if (val & MCPR_NVM_COMMAND_DONE) {
8833 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008834 /* we read nvram data in cpu order
8835 * but ethtool sees it as an array of bytes
8836 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008837 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008838 rc = 0;
8839 break;
8840 }
8841 }
8842
8843 return rc;
8844}
8845
8846static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8847 int buf_size)
8848{
8849 int rc;
8850 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008851 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008852
8853 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008854 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008855 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008856 offset, buf_size);
8857 return -EINVAL;
8858 }
8859
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008860 if (offset + buf_size > bp->common.flash_size) {
8861 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008862 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008863 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008864 return -EINVAL;
8865 }
8866
8867 /* request access to nvram interface */
8868 rc = bnx2x_acquire_nvram_lock(bp);
8869 if (rc)
8870 return rc;
8871
8872 /* enable access to nvram interface */
8873 bnx2x_enable_nvram_access(bp);
8874
8875 /* read the first word(s) */
8876 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8877 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8878 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8879 memcpy(ret_buf, &val, 4);
8880
8881 /* advance to the next dword */
8882 offset += sizeof(u32);
8883 ret_buf += sizeof(u32);
8884 buf_size -= sizeof(u32);
8885 cmd_flags = 0;
8886 }
8887
8888 if (rc == 0) {
8889 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8890 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8891 memcpy(ret_buf, &val, 4);
8892 }
8893
8894 /* disable access to nvram interface */
8895 bnx2x_disable_nvram_access(bp);
8896 bnx2x_release_nvram_lock(bp);
8897
8898 return rc;
8899}
8900
8901static int bnx2x_get_eeprom(struct net_device *dev,
8902 struct ethtool_eeprom *eeprom, u8 *eebuf)
8903{
8904 struct bnx2x *bp = netdev_priv(dev);
8905 int rc;
8906
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00008907 if (!netif_running(dev))
8908 return -EAGAIN;
8909
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008910 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008911 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8912 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8913 eeprom->len, eeprom->len);
8914
8915 /* parameters already validated in ethtool_get_eeprom */
8916
8917 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8918
8919 return rc;
8920}
8921
8922static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8923 u32 cmd_flags)
8924{
Eliezer Tamirf1410642008-02-28 11:51:50 -08008925 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008926
8927 /* build the command word */
8928 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8929
8930 /* need to clear DONE bit separately */
8931 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8932
8933 /* write the data */
8934 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8935
8936 /* address of the NVRAM to write to */
8937 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8938 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8939
8940 /* issue the write command */
8941 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8942
8943 /* adjust timeout for emulation/FPGA */
8944 count = NVRAM_TIMEOUT_COUNT;
8945 if (CHIP_REV_IS_SLOW(bp))
8946 count *= 100;
8947
8948 /* wait for completion */
8949 rc = -EBUSY;
8950 for (i = 0; i < count; i++) {
8951 udelay(5);
8952 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8953 if (val & MCPR_NVM_COMMAND_DONE) {
8954 rc = 0;
8955 break;
8956 }
8957 }
8958
8959 return rc;
8960}
8961
Eliezer Tamirf1410642008-02-28 11:51:50 -08008962#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008963
8964static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8965 int buf_size)
8966{
8967 int rc;
8968 u32 cmd_flags;
8969 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008970 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008971
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008972 if (offset + buf_size > bp->common.flash_size) {
8973 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008974 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008975 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008976 return -EINVAL;
8977 }
8978
8979 /* request access to nvram interface */
8980 rc = bnx2x_acquire_nvram_lock(bp);
8981 if (rc)
8982 return rc;
8983
8984 /* enable access to nvram interface */
8985 bnx2x_enable_nvram_access(bp);
8986
8987 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8988 align_offset = (offset & ~0x03);
8989 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8990
8991 if (rc == 0) {
8992 val &= ~(0xff << BYTE_OFFSET(offset));
8993 val |= (*data_buf << BYTE_OFFSET(offset));
8994
8995 /* nvram data is returned as an array of bytes
8996 * convert it back to cpu order */
8997 val = be32_to_cpu(val);
8998
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008999 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9000 cmd_flags);
9001 }
9002
9003 /* disable access to nvram interface */
9004 bnx2x_disable_nvram_access(bp);
9005 bnx2x_release_nvram_lock(bp);
9006
9007 return rc;
9008}
9009
9010static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9011 int buf_size)
9012{
9013 int rc;
9014 u32 cmd_flags;
9015 u32 val;
9016 u32 written_so_far;
9017
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009018 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009019 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009020
9021 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009022 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009023 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009024 offset, buf_size);
9025 return -EINVAL;
9026 }
9027
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009028 if (offset + buf_size > bp->common.flash_size) {
9029 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009030 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009031 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009032 return -EINVAL;
9033 }
9034
9035 /* request access to nvram interface */
9036 rc = bnx2x_acquire_nvram_lock(bp);
9037 if (rc)
9038 return rc;
9039
9040 /* enable access to nvram interface */
9041 bnx2x_enable_nvram_access(bp);
9042
9043 written_so_far = 0;
9044 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9045 while ((written_so_far < buf_size) && (rc == 0)) {
9046 if (written_so_far == (buf_size - sizeof(u32)))
9047 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9048 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9049 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9050 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9051 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9052
9053 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009054
9055 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9056
9057 /* advance to the next dword */
9058 offset += sizeof(u32);
9059 data_buf += sizeof(u32);
9060 written_so_far += sizeof(u32);
9061 cmd_flags = 0;
9062 }
9063
9064 /* disable access to nvram interface */
9065 bnx2x_disable_nvram_access(bp);
9066 bnx2x_release_nvram_lock(bp);
9067
9068 return rc;
9069}
9070
9071static int bnx2x_set_eeprom(struct net_device *dev,
9072 struct ethtool_eeprom *eeprom, u8 *eebuf)
9073{
9074 struct bnx2x *bp = netdev_priv(dev);
9075 int rc;
9076
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009077 if (!netif_running(dev))
9078 return -EAGAIN;
9079
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009080 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009081 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9082 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9083 eeprom->len, eeprom->len);
9084
9085 /* parameters already validated in ethtool_set_eeprom */
9086
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009087 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009088 if (eeprom->magic == 0x00504859)
9089 if (bp->port.pmf) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009090
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009091 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009092 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9093 bp->link_params.ext_phy_config,
9094 (bp->state != BNX2X_STATE_CLOSED),
9095 eebuf, eeprom->len);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009096 if ((bp->state == BNX2X_STATE_OPEN) ||
9097 (bp->state == BNX2X_STATE_DISABLED)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009098 rc |= bnx2x_link_reset(&bp->link_params,
Eilon Greenstein589abe32009-02-12 08:36:55 +00009099 &bp->link_vars, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009100 rc |= bnx2x_phy_init(&bp->link_params,
9101 &bp->link_vars);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009102 }
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009103 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009104
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009105 } else /* Only the PMF can access the PHY */
9106 return -EINVAL;
9107 else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009108 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009109
9110 return rc;
9111}
9112
9113static int bnx2x_get_coalesce(struct net_device *dev,
9114 struct ethtool_coalesce *coal)
9115{
9116 struct bnx2x *bp = netdev_priv(dev);
9117
9118 memset(coal, 0, sizeof(struct ethtool_coalesce));
9119
9120 coal->rx_coalesce_usecs = bp->rx_ticks;
9121 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009122
9123 return 0;
9124}
9125
9126static int bnx2x_set_coalesce(struct net_device *dev,
9127 struct ethtool_coalesce *coal)
9128{
9129 struct bnx2x *bp = netdev_priv(dev);
9130
9131 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greenstein1e9d9982009-07-05 04:18:14 +00009132 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9133 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009134
9135 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greenstein1e9d9982009-07-05 04:18:14 +00009136 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9137 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009139 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140 bnx2x_update_coalesce(bp);
9141
9142 return 0;
9143}
9144
9145static void bnx2x_get_ringparam(struct net_device *dev,
9146 struct ethtool_ringparam *ering)
9147{
9148 struct bnx2x *bp = netdev_priv(dev);
9149
9150 ering->rx_max_pending = MAX_RX_AVAIL;
9151 ering->rx_mini_max_pending = 0;
9152 ering->rx_jumbo_max_pending = 0;
9153
9154 ering->rx_pending = bp->rx_ring_size;
9155 ering->rx_mini_pending = 0;
9156 ering->rx_jumbo_pending = 0;
9157
9158 ering->tx_max_pending = MAX_TX_AVAIL;
9159 ering->tx_pending = bp->tx_ring_size;
9160}
9161
9162static int bnx2x_set_ringparam(struct net_device *dev,
9163 struct ethtool_ringparam *ering)
9164{
9165 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009166 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009167
9168 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9169 (ering->tx_pending > MAX_TX_AVAIL) ||
9170 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9171 return -EINVAL;
9172
9173 bp->rx_ring_size = ering->rx_pending;
9174 bp->tx_ring_size = ering->tx_pending;
9175
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009176 if (netif_running(dev)) {
9177 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9178 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009179 }
9180
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009181 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009182}
9183
9184static void bnx2x_get_pauseparam(struct net_device *dev,
9185 struct ethtool_pauseparam *epause)
9186{
9187 struct bnx2x *bp = netdev_priv(dev);
9188
Eilon Greenstein356e2382009-02-12 08:38:32 +00009189 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9190 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009191 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9192
David S. Millerc0700f92008-12-16 23:53:20 -08009193 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9194 BNX2X_FLOW_CTRL_RX);
9195 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9196 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009197
9198 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9199 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9200 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9201}
9202
9203static int bnx2x_set_pauseparam(struct net_device *dev,
9204 struct ethtool_pauseparam *epause)
9205{
9206 struct bnx2x *bp = netdev_priv(dev);
9207
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009208 if (IS_E1HMF(bp))
9209 return 0;
9210
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009211 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9212 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9213 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9214
David S. Millerc0700f92008-12-16 23:53:20 -08009215 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009216
9217 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009218 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009219
9220 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009221 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009222
David S. Millerc0700f92008-12-16 23:53:20 -08009223 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9224 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009226 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009227 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009228 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009229 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009230 }
9231
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009232 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009233 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009234 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009235
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009236 DP(NETIF_MSG_LINK,
9237 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009238
9239 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009240 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009241 bnx2x_link_set(bp);
9242 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009243
9244 return 0;
9245}
9246
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009247static int bnx2x_set_flags(struct net_device *dev, u32 data)
9248{
9249 struct bnx2x *bp = netdev_priv(dev);
9250 int changed = 0;
9251 int rc = 0;
9252
9253 /* TPA requires Rx CSUM offloading */
9254 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9255 if (!(dev->features & NETIF_F_LRO)) {
9256 dev->features |= NETIF_F_LRO;
9257 bp->flags |= TPA_ENABLE_FLAG;
9258 changed = 1;
9259 }
9260
9261 } else if (dev->features & NETIF_F_LRO) {
9262 dev->features &= ~NETIF_F_LRO;
9263 bp->flags &= ~TPA_ENABLE_FLAG;
9264 changed = 1;
9265 }
9266
9267 if (changed && netif_running(dev)) {
9268 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9269 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9270 }
9271
9272 return rc;
9273}
9274
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009275static u32 bnx2x_get_rx_csum(struct net_device *dev)
9276{
9277 struct bnx2x *bp = netdev_priv(dev);
9278
9279 return bp->rx_csum;
9280}
9281
9282static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9283{
9284 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009285 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009286
9287 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009288
9289 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9290 TPA'ed packets will be discarded due to wrong TCP CSUM */
9291 if (!data) {
9292 u32 flags = ethtool_op_get_flags(dev);
9293
9294 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9295 }
9296
9297 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009298}
9299
9300static int bnx2x_set_tso(struct net_device *dev, u32 data)
9301{
Eilon Greenstein755735e2008-06-23 20:35:13 -07009302 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009303 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -07009304 dev->features |= NETIF_F_TSO6;
9305 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009306 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -07009307 dev->features &= ~NETIF_F_TSO6;
9308 }
9309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009310 return 0;
9311}
9312
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009313static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009314 char string[ETH_GSTRING_LEN];
9315} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009316 { "register_test (offline)" },
9317 { "memory_test (offline)" },
9318 { "loopback_test (offline)" },
9319 { "nvram_test (online)" },
9320 { "interrupt_test (online)" },
9321 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +00009322 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009323};
9324
9325static int bnx2x_self_test_count(struct net_device *dev)
9326{
9327 return BNX2X_NUM_TESTS;
9328}
9329
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009330static int bnx2x_test_registers(struct bnx2x *bp)
9331{
9332 int idx, i, rc = -ENODEV;
9333 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009334 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009335 static const struct {
9336 u32 offset0;
9337 u32 offset1;
9338 u32 mask;
9339 } reg_tbl[] = {
9340/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9341 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9342 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9343 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9344 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9345 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9346 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9347 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9348 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9349 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9350/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9351 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9352 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9353 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9354 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9355 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9356 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9357 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9358 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9359 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9360/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9361 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9362 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9363 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9364 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9365 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9366 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9367 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9368 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9369 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9370/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9371 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9372 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9373 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9374 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9375 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9376 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9377 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9378
9379 { 0xffffffff, 0, 0x00000000 }
9380 };
9381
9382 if (!netif_running(bp->dev))
9383 return rc;
9384
9385 /* Repeat the test twice:
9386 First by writing 0x00000000, second by writing 0xffffffff */
9387 for (idx = 0; idx < 2; idx++) {
9388
9389 switch (idx) {
9390 case 0:
9391 wr_val = 0;
9392 break;
9393 case 1:
9394 wr_val = 0xffffffff;
9395 break;
9396 }
9397
9398 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9399 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009400
9401 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9402 mask = reg_tbl[i].mask;
9403
9404 save_val = REG_RD(bp, offset);
9405
9406 REG_WR(bp, offset, wr_val);
9407 val = REG_RD(bp, offset);
9408
9409 /* Restore the original register's value */
9410 REG_WR(bp, offset, save_val);
9411
9412 /* verify that value is as expected value */
9413 if ((val & mask) != (wr_val & mask))
9414 goto test_reg_exit;
9415 }
9416 }
9417
9418 rc = 0;
9419
9420test_reg_exit:
9421 return rc;
9422}
9423
9424static int bnx2x_test_memory(struct bnx2x *bp)
9425{
9426 int i, j, rc = -ENODEV;
9427 u32 val;
9428 static const struct {
9429 u32 offset;
9430 int size;
9431 } mem_tbl[] = {
9432 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9433 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9434 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9435 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9436 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9437 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9438 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9439
9440 { 0xffffffff, 0 }
9441 };
9442 static const struct {
9443 char *name;
9444 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009445 u32 e1_mask;
9446 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009447 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009448 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9449 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9450 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9451 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9452 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9453 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009454
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009455 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009456 };
9457
9458 if (!netif_running(bp->dev))
9459 return rc;
9460
9461 /* Go through all the memories */
9462 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9463 for (j = 0; j < mem_tbl[i].size; j++)
9464 REG_RD(bp, mem_tbl[i].offset + j*4);
9465
9466 /* Check the parity status */
9467 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9468 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009469 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9470 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009471 DP(NETIF_MSG_HW,
9472 "%s is 0x%x\n", prty_tbl[i].name, val);
9473 goto test_mem_exit;
9474 }
9475 }
9476
9477 rc = 0;
9478
9479test_mem_exit:
9480 return rc;
9481}
9482
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009483static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9484{
9485 int cnt = 1000;
9486
9487 if (link_up)
9488 while (bnx2x_link_test(bp) && cnt--)
9489 msleep(10);
9490}
9491
9492static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9493{
9494 unsigned int pkt_size, num_pkts, i;
9495 struct sk_buff *skb;
9496 unsigned char *packet;
9497 struct bnx2x_fastpath *fp = &bp->fp[0];
9498 u16 tx_start_idx, tx_idx;
9499 u16 rx_start_idx, rx_idx;
9500 u16 pkt_prod;
9501 struct sw_tx_bd *tx_buf;
9502 struct eth_tx_bd *tx_bd;
9503 dma_addr_t mapping;
9504 union eth_rx_cqe *cqe;
9505 u8 cqe_fp_flags;
9506 struct sw_rx_bd *rx_buf;
9507 u16 len;
9508 int rc = -ENODEV;
9509
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009510 /* check the loopback mode */
9511 switch (loopback_mode) {
9512 case BNX2X_PHY_LOOPBACK:
9513 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9514 return -EINVAL;
9515 break;
9516 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009517 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009518 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009519 break;
9520 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009521 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009522 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009523
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009524 /* prepare the loopback packet */
9525 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9526 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009527 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9528 if (!skb) {
9529 rc = -ENOMEM;
9530 goto test_loopback_exit;
9531 }
9532 packet = skb_put(skb, pkt_size);
9533 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9534 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9535 for (i = ETH_HLEN; i < pkt_size; i++)
9536 packet[i] = (unsigned char) (i & 0xff);
9537
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009538 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009539 num_pkts = 0;
9540 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9541 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9542
9543 pkt_prod = fp->tx_pkt_prod++;
9544 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9545 tx_buf->first_bd = fp->tx_bd_prod;
9546 tx_buf->skb = skb;
9547
9548 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9549 mapping = pci_map_single(bp->pdev, skb->data,
9550 skb_headlen(skb), PCI_DMA_TODEVICE);
9551 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9552 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9553 tx_bd->nbd = cpu_to_le16(1);
9554 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9555 tx_bd->vlan = cpu_to_le16(pkt_prod);
9556 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9557 ETH_TX_BD_FLAGS_END_BD);
9558 tx_bd->general_data = ((UNICAST_ADDRESS <<
9559 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9560
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08009561 wmb();
9562
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009563 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009564 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009565 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +00009566 DOORBELL(bp, fp->index, 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009567
9568 mmiowb();
9569
9570 num_pkts++;
9571 fp->tx_bd_prod++;
9572 bp->dev->trans_start = jiffies;
9573
9574 udelay(100);
9575
9576 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9577 if (tx_idx != tx_start_idx + num_pkts)
9578 goto test_loopback_exit;
9579
9580 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9581 if (rx_idx != rx_start_idx + num_pkts)
9582 goto test_loopback_exit;
9583
9584 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9585 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9586 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9587 goto test_loopback_rx_exit;
9588
9589 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9590 if (len != pkt_size)
9591 goto test_loopback_rx_exit;
9592
9593 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9594 skb = rx_buf->skb;
9595 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9596 for (i = ETH_HLEN; i < pkt_size; i++)
9597 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9598 goto test_loopback_rx_exit;
9599
9600 rc = 0;
9601
9602test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009603
9604 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9605 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9606 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9607 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9608
9609 /* Update producers */
9610 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9611 fp->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009612
9613test_loopback_exit:
9614 bp->link_params.loopback_mode = LOOPBACK_NONE;
9615
9616 return rc;
9617}
9618
9619static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9620{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009621 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009622
9623 if (!netif_running(bp->dev))
9624 return BNX2X_LOOPBACK_FAILED;
9625
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009626 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +00009627 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009628
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009629 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9630 if (res) {
9631 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9632 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009633 }
9634
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009635 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9636 if (res) {
9637 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9638 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009639 }
9640
Eilon Greenstein3910c8a2009-01-22 06:01:32 +00009641 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009642 bnx2x_netif_start(bp);
9643
9644 return rc;
9645}
9646
9647#define CRC32_RESIDUAL 0xdebb20e3
9648
9649static int bnx2x_test_nvram(struct bnx2x *bp)
9650{
9651 static const struct {
9652 int offset;
9653 int size;
9654 } nvram_tbl[] = {
9655 { 0, 0x14 }, /* bootstrap */
9656 { 0x14, 0xec }, /* dir */
9657 { 0x100, 0x350 }, /* manuf_info */
9658 { 0x450, 0xf0 }, /* feature_info */
9659 { 0x640, 0x64 }, /* upgrade_key_info */
9660 { 0x6a4, 0x64 },
9661 { 0x708, 0x70 }, /* manuf_key_info */
9662 { 0x778, 0x70 },
9663 { 0, 0 }
9664 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009665 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009666 u8 *data = (u8 *)buf;
9667 int i, rc;
9668 u32 magic, csum;
9669
9670 rc = bnx2x_nvram_read(bp, 0, data, 4);
9671 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00009672 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009673 goto test_nvram_exit;
9674 }
9675
9676 magic = be32_to_cpu(buf[0]);
9677 if (magic != 0x669955aa) {
9678 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9679 rc = -ENODEV;
9680 goto test_nvram_exit;
9681 }
9682
9683 for (i = 0; nvram_tbl[i].size; i++) {
9684
9685 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9686 nvram_tbl[i].size);
9687 if (rc) {
9688 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +00009689 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009690 goto test_nvram_exit;
9691 }
9692
9693 csum = ether_crc_le(nvram_tbl[i].size, data);
9694 if (csum != CRC32_RESIDUAL) {
9695 DP(NETIF_MSG_PROBE,
9696 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9697 rc = -ENODEV;
9698 goto test_nvram_exit;
9699 }
9700 }
9701
9702test_nvram_exit:
9703 return rc;
9704}
9705
9706static int bnx2x_test_intr(struct bnx2x *bp)
9707{
9708 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9709 int i, rc;
9710
9711 if (!netif_running(bp->dev))
9712 return -ENODEV;
9713
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08009714 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +00009715 if (CHIP_IS_E1(bp))
9716 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9717 else
9718 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00009719 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009720 config->hdr.reserved1 = 0;
9721
9722 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9723 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9724 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9725 if (rc == 0) {
9726 bp->set_mac_pending++;
9727 for (i = 0; i < 10; i++) {
9728 if (!bp->set_mac_pending)
9729 break;
9730 msleep_interruptible(10);
9731 }
9732 if (i == 10)
9733 rc = -ENODEV;
9734 }
9735
9736 return rc;
9737}
9738
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009739static void bnx2x_self_test(struct net_device *dev,
9740 struct ethtool_test *etest, u64 *buf)
9741{
9742 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009743
9744 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9745
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009746 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009747 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009748
Eilon Greenstein33471622008-08-13 15:59:08 -07009749 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009750 if (IS_E1HMF(bp))
9751 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9752
9753 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +00009754 int port = BP_PORT(bp);
9755 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009756 u8 link_up;
9757
Eilon Greenstein279abdf2009-07-21 05:47:22 +00009758 /* save current value of input enable for TX port IF */
9759 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9760 /* disable input for TX port IF */
9761 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9762
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009763 link_up = bp->link_vars.link_up;
9764 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9765 bnx2x_nic_load(bp, LOAD_DIAG);
9766 /* wait until link state is restored */
9767 bnx2x_wait_for_link(bp, link_up);
9768
9769 if (bnx2x_test_registers(bp) != 0) {
9770 buf[0] = 1;
9771 etest->flags |= ETH_TEST_FL_FAILED;
9772 }
9773 if (bnx2x_test_memory(bp) != 0) {
9774 buf[1] = 1;
9775 etest->flags |= ETH_TEST_FL_FAILED;
9776 }
9777 buf[2] = bnx2x_test_loopback(bp, link_up);
9778 if (buf[2] != 0)
9779 etest->flags |= ETH_TEST_FL_FAILED;
9780
9781 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +00009782
9783 /* restore input for TX port IF */
9784 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9785
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009786 bnx2x_nic_load(bp, LOAD_NORMAL);
9787 /* wait until link state is restored */
9788 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009789 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009790 if (bnx2x_test_nvram(bp) != 0) {
9791 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009792 etest->flags |= ETH_TEST_FL_FAILED;
9793 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009794 if (bnx2x_test_intr(bp) != 0) {
9795 buf[4] = 1;
9796 etest->flags |= ETH_TEST_FL_FAILED;
9797 }
9798 if (bp->port.pmf)
9799 if (bnx2x_link_test(bp) != 0) {
9800 buf[5] = 1;
9801 etest->flags |= ETH_TEST_FL_FAILED;
9802 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009803
9804#ifdef BNX2X_EXTRA_DEBUG
9805 bnx2x_panic_dump(bp);
9806#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009807}
9808
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009809static const struct {
9810 long offset;
9811 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +00009812 u8 string[ETH_GSTRING_LEN];
9813} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9814/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9815 { Q_STATS_OFFSET32(error_bytes_received_hi),
9816 8, "[%d]: rx_error_bytes" },
9817 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9818 8, "[%d]: rx_ucast_packets" },
9819 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9820 8, "[%d]: rx_mcast_packets" },
9821 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9822 8, "[%d]: rx_bcast_packets" },
9823 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9824 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9825 4, "[%d]: rx_phy_ip_err_discards"},
9826 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9827 4, "[%d]: rx_skb_alloc_discard" },
9828 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9829
9830/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9831 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9832 8, "[%d]: tx_packets" }
9833};
9834
9835static const struct {
9836 long offset;
9837 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009838 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009839#define STATS_FLAGS_PORT 1
9840#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +00009841#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009842 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009843} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +00009844/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9845 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009846 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009847 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009848 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009849 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009850 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009851 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009852 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00009853 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009854 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009855 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009856 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009857 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009858 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9859 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9860 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9861 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9862/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9863 8, STATS_FLAGS_PORT, "rx_fragments" },
9864 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9865 8, STATS_FLAGS_PORT, "rx_jabbers" },
9866 { STATS_OFFSET32(no_buff_discard_hi),
9867 8, STATS_FLAGS_BOTH, "rx_discards" },
9868 { STATS_OFFSET32(mac_filter_discard),
9869 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9870 { STATS_OFFSET32(xxoverflow_discard),
9871 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9872 { STATS_OFFSET32(brb_drop_hi),
9873 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9874 { STATS_OFFSET32(brb_truncate_hi),
9875 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9876 { STATS_OFFSET32(pause_frames_received_hi),
9877 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9878 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9879 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9880 { STATS_OFFSET32(nig_timer_max),
9881 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9882/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9883 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9884 { STATS_OFFSET32(rx_skb_alloc_failed),
9885 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9886 { STATS_OFFSET32(hw_csum_err),
9887 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9888
9889 { STATS_OFFSET32(total_bytes_transmitted_hi),
9890 8, STATS_FLAGS_BOTH, "tx_bytes" },
9891 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9892 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9893 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9894 8, STATS_FLAGS_BOTH, "tx_packets" },
9895 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9896 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9897 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9898 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009899 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009900 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009901 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009902 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009903/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009904 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009905 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009906 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009907 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009908 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009909 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009910 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009911 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009912 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009913 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009914 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009915 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009916 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009917 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009918 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009919 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009920 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009921 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009922 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009923/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009924 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +00009925 { STATS_OFFSET32(pause_frames_sent_hi),
9926 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009927};
9928
Eilon Greensteinde832a52009-02-12 08:36:33 +00009929#define IS_PORT_STAT(i) \
9930 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9931#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9932#define IS_E1HMF_MODE_STAT(bp) \
9933 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -07009934
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009935static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9936{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009937 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009938 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009939
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009940 switch (stringset) {
9941 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +00009942 if (is_multi(bp)) {
9943 k = 0;
9944 for_each_queue(bp, i) {
9945 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9946 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9947 bnx2x_q_stats_arr[j].string, i);
9948 k += BNX2X_NUM_Q_STATS;
9949 }
9950 if (IS_E1HMF_MODE_STAT(bp))
9951 break;
9952 for (j = 0; j < BNX2X_NUM_STATS; j++)
9953 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9954 bnx2x_stats_arr[j].string);
9955 } else {
9956 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9957 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9958 continue;
9959 strcpy(buf + j*ETH_GSTRING_LEN,
9960 bnx2x_stats_arr[i].string);
9961 j++;
9962 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009963 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009964 break;
9965
9966 case ETH_SS_TEST:
9967 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9968 break;
9969 }
9970}
9971
9972static int bnx2x_get_stats_count(struct net_device *dev)
9973{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009974 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009975 int i, num_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009976
Eilon Greensteinde832a52009-02-12 08:36:33 +00009977 if (is_multi(bp)) {
9978 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9979 if (!IS_E1HMF_MODE_STAT(bp))
9980 num_stats += BNX2X_NUM_STATS;
9981 } else {
9982 if (IS_E1HMF_MODE_STAT(bp)) {
9983 num_stats = 0;
9984 for (i = 0; i < BNX2X_NUM_STATS; i++)
9985 if (IS_FUNC_STAT(i))
9986 num_stats++;
9987 } else
9988 num_stats = BNX2X_NUM_STATS;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009989 }
Eilon Greensteinde832a52009-02-12 08:36:33 +00009990
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009991 return num_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009992}
9993
9994static void bnx2x_get_ethtool_stats(struct net_device *dev,
9995 struct ethtool_stats *stats, u64 *buf)
9996{
9997 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +00009998 u32 *hw_stats, *offset;
9999 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010000
Eilon Greensteinde832a52009-02-12 08:36:33 +000010001 if (is_multi(bp)) {
10002 k = 0;
10003 for_each_queue(bp, i) {
10004 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10005 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10006 if (bnx2x_q_stats_arr[j].size == 0) {
10007 /* skip this counter */
10008 buf[k + j] = 0;
10009 continue;
10010 }
10011 offset = (hw_stats +
10012 bnx2x_q_stats_arr[j].offset);
10013 if (bnx2x_q_stats_arr[j].size == 4) {
10014 /* 4-byte counter */
10015 buf[k + j] = (u64) *offset;
10016 continue;
10017 }
10018 /* 8-byte counter */
10019 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10020 }
10021 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010022 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010023 if (IS_E1HMF_MODE_STAT(bp))
10024 return;
10025 hw_stats = (u32 *)&bp->eth_stats;
10026 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10027 if (bnx2x_stats_arr[j].size == 0) {
10028 /* skip this counter */
10029 buf[k + j] = 0;
10030 continue;
10031 }
10032 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10033 if (bnx2x_stats_arr[j].size == 4) {
10034 /* 4-byte counter */
10035 buf[k + j] = (u64) *offset;
10036 continue;
10037 }
10038 /* 8-byte counter */
10039 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010040 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010041 } else {
10042 hw_stats = (u32 *)&bp->eth_stats;
10043 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10044 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10045 continue;
10046 if (bnx2x_stats_arr[i].size == 0) {
10047 /* skip this counter */
10048 buf[j] = 0;
10049 j++;
10050 continue;
10051 }
10052 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10053 if (bnx2x_stats_arr[i].size == 4) {
10054 /* 4-byte counter */
10055 buf[j] = (u64) *offset;
10056 j++;
10057 continue;
10058 }
10059 /* 8-byte counter */
10060 buf[j] = HILO_U64(*offset, *(offset + 1));
10061 j++;
10062 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010063 }
10064}
10065
10066static int bnx2x_phys_id(struct net_device *dev, u32 data)
10067{
10068 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010069 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010070 int i;
10071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010072 if (!netif_running(dev))
10073 return 0;
10074
10075 if (!bp->port.pmf)
10076 return 0;
10077
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010078 if (data == 0)
10079 data = 2;
10080
10081 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010082 if ((i % 2) == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010083 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010084 bp->link_params.hw_led_mode,
10085 bp->link_params.chip_id);
10086 else
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010087 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010088 bp->link_params.hw_led_mode,
10089 bp->link_params.chip_id);
10090
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010091 msleep_interruptible(500);
10092 if (signal_pending(current))
10093 break;
10094 }
10095
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010096 if (bp->link_vars.link_up)
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010097 bnx2x_set_led(bp, port, LED_MODE_OPER,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010098 bp->link_vars.line_speed,
10099 bp->link_params.hw_led_mode,
10100 bp->link_params.chip_id);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010101
10102 return 0;
10103}
10104
10105static struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010106 .get_settings = bnx2x_get_settings,
10107 .set_settings = bnx2x_set_settings,
10108 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010109 .get_regs_len = bnx2x_get_regs_len,
10110 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010111 .get_wol = bnx2x_get_wol,
10112 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010113 .get_msglevel = bnx2x_get_msglevel,
10114 .set_msglevel = bnx2x_set_msglevel,
10115 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010116 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010117 .get_eeprom_len = bnx2x_get_eeprom_len,
10118 .get_eeprom = bnx2x_get_eeprom,
10119 .set_eeprom = bnx2x_set_eeprom,
10120 .get_coalesce = bnx2x_get_coalesce,
10121 .set_coalesce = bnx2x_set_coalesce,
10122 .get_ringparam = bnx2x_get_ringparam,
10123 .set_ringparam = bnx2x_set_ringparam,
10124 .get_pauseparam = bnx2x_get_pauseparam,
10125 .set_pauseparam = bnx2x_set_pauseparam,
10126 .get_rx_csum = bnx2x_get_rx_csum,
10127 .set_rx_csum = bnx2x_set_rx_csum,
10128 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010129 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010130 .set_flags = bnx2x_set_flags,
10131 .get_flags = ethtool_op_get_flags,
10132 .get_sg = ethtool_op_get_sg,
10133 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010134 .get_tso = ethtool_op_get_tso,
10135 .set_tso = bnx2x_set_tso,
10136 .self_test_count = bnx2x_self_test_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010137 .self_test = bnx2x_self_test,
10138 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010139 .phys_id = bnx2x_phys_id,
10140 .get_stats_count = bnx2x_get_stats_count,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010141 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010142};
10143
10144/* end of ethtool_ops */
10145
10146/****************************************************************************
10147* General service functions
10148****************************************************************************/
10149
10150static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10151{
10152 u16 pmcsr;
10153
10154 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10155
10156 switch (state) {
10157 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010158 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010159 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10160 PCI_PM_CTRL_PME_STATUS));
10161
10162 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010163 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010164 msleep(20);
10165 break;
10166
10167 case PCI_D3hot:
10168 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10169 pmcsr |= 3;
10170
10171 if (bp->wol)
10172 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10173
10174 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10175 pmcsr);
10176
10177 /* No more memory access after this point until
10178 * device is brought back to D0.
10179 */
10180 break;
10181
10182 default:
10183 return -EINVAL;
10184 }
10185 return 0;
10186}
10187
Eilon Greenstein237907c2009-01-14 06:42:44 +000010188static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10189{
10190 u16 rx_cons_sb;
10191
10192 /* Tell compiler that status block fields can change */
10193 barrier();
10194 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10195 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10196 rx_cons_sb++;
10197 return (fp->rx_comp_cons != rx_cons_sb);
10198}
10199
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010200/*
10201 * net_device service functions
10202 */
10203
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010204static int bnx2x_poll(struct napi_struct *napi, int budget)
10205{
10206 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10207 napi);
10208 struct bnx2x *bp = fp->bp;
10209 int work_done = 0;
10210
10211#ifdef BNX2X_STOP_ON_ERROR
10212 if (unlikely(bp->panic))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010213 goto poll_panic;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010214#endif
10215
10216 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10217 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10218 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10219
10220 bnx2x_update_fpsb_idx(fp);
10221
Eilon Greenstein237907c2009-01-14 06:42:44 +000010222 if (bnx2x_has_tx_work(fp))
Eilon Greenstein7961f792009-03-02 07:59:31 +000010223 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010224
Eilon Greenstein8534f322009-03-02 07:59:45 +000010225 if (bnx2x_has_rx_work(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010226 work_done = bnx2x_rx_int(fp, budget);
Eilon Greenstein356e2382009-02-12 08:38:32 +000010227
Eilon Greenstein8534f322009-03-02 07:59:45 +000010228 /* must not complete if we consumed full budget */
10229 if (work_done >= budget)
10230 goto poll_again;
10231 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010232
Eilon Greenstein8534f322009-03-02 07:59:45 +000010233 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10234 * ensure that status block indices have been actually read
10235 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10236 * so that we won't write the "newer" value of the status block to IGU
10237 * (if there was a DMA right after BNX2X_HAS_WORK and
10238 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10239 * may be postponed to right before bnx2x_ack_sb). In this case
10240 * there will never be another interrupt until there is another update
10241 * of the status block, while there is still unhandled work.
10242 */
10243 rmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010244
Eilon Greenstein8534f322009-03-02 07:59:45 +000010245 if (!BNX2X_HAS_WORK(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010246#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010247poll_panic:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010248#endif
Ben Hutchings288379f2009-01-19 16:43:59 -080010249 napi_complete(napi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010250
Eilon Greenstein0626b892009-02-12 08:38:14 +000010251 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010252 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010253 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010254 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10255 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010256
Eilon Greenstein8534f322009-03-02 07:59:45 +000010257poll_again:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010258 return work_done;
10259}
10260
Eilon Greenstein755735e2008-06-23 20:35:13 -070010261
10262/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010263 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070010264 * we use one mapping for both BDs
10265 * So far this has only been observed to happen
10266 * in Other Operating Systems(TM)
10267 */
10268static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10269 struct bnx2x_fastpath *fp,
10270 struct eth_tx_bd **tx_bd, u16 hlen,
10271 u16 bd_prod, int nbd)
10272{
10273 struct eth_tx_bd *h_tx_bd = *tx_bd;
10274 struct eth_tx_bd *d_tx_bd;
10275 dma_addr_t mapping;
10276 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10277
10278 /* first fix first BD */
10279 h_tx_bd->nbd = cpu_to_le16(nbd);
10280 h_tx_bd->nbytes = cpu_to_le16(hlen);
10281
10282 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10283 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10284 h_tx_bd->addr_lo, h_tx_bd->nbd);
10285
10286 /* now get a new data BD
10287 * (after the pbd) and fill it */
10288 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10289 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10290
10291 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10292 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10293
10294 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10295 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10296 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10297 d_tx_bd->vlan = 0;
10298 /* this marks the BD as one that has no individual mapping
10299 * the FW ignores this flag in a BD not marked start
10300 */
10301 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10302 DP(NETIF_MSG_TX_QUEUED,
10303 "TSO split data size is %d (%x:%x)\n",
10304 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10305
10306 /* update tx_bd for marking the last BD flag */
10307 *tx_bd = d_tx_bd;
10308
10309 return bd_prod;
10310}
10311
10312static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10313{
10314 if (fix > 0)
10315 csum = (u16) ~csum_fold(csum_sub(csum,
10316 csum_partial(t_header - fix, fix, 0)));
10317
10318 else if (fix < 0)
10319 csum = (u16) ~csum_fold(csum_add(csum,
10320 csum_partial(t_header, -fix, 0)));
10321
10322 return swab16(csum);
10323}
10324
10325static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10326{
10327 u32 rc;
10328
10329 if (skb->ip_summed != CHECKSUM_PARTIAL)
10330 rc = XMIT_PLAIN;
10331
10332 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010333 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070010334 rc = XMIT_CSUM_V6;
10335 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10336 rc |= XMIT_CSUM_TCP;
10337
10338 } else {
10339 rc = XMIT_CSUM_V4;
10340 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10341 rc |= XMIT_CSUM_TCP;
10342 }
10343 }
10344
10345 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10346 rc |= XMIT_GSO_V4;
10347
10348 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10349 rc |= XMIT_GSO_V6;
10350
10351 return rc;
10352}
10353
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010354#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000010355/* check if packet requires linearization (packet is too fragmented)
10356 no need to check fragmentation if page size > 8K (there will be no
10357 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070010358static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10359 u32 xmit_type)
10360{
10361 int to_copy = 0;
10362 int hlen = 0;
10363 int first_bd_sz = 0;
10364
10365 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10366 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10367
10368 if (xmit_type & XMIT_GSO) {
10369 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10370 /* Check if LSO packet needs to be copied:
10371 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10372 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070010373 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070010374 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10375 int wnd_idx = 0;
10376 int frag_idx = 0;
10377 u32 wnd_sum = 0;
10378
10379 /* Headers length */
10380 hlen = (int)(skb_transport_header(skb) - skb->data) +
10381 tcp_hdrlen(skb);
10382
10383 /* Amount of data (w/o headers) on linear part of SKB*/
10384 first_bd_sz = skb_headlen(skb) - hlen;
10385
10386 wnd_sum = first_bd_sz;
10387
10388 /* Calculate the first sum - it's special */
10389 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10390 wnd_sum +=
10391 skb_shinfo(skb)->frags[frag_idx].size;
10392
10393 /* If there was data on linear skb data - check it */
10394 if (first_bd_sz > 0) {
10395 if (unlikely(wnd_sum < lso_mss)) {
10396 to_copy = 1;
10397 goto exit_lbl;
10398 }
10399
10400 wnd_sum -= first_bd_sz;
10401 }
10402
10403 /* Others are easier: run through the frag list and
10404 check all windows */
10405 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10406 wnd_sum +=
10407 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10408
10409 if (unlikely(wnd_sum < lso_mss)) {
10410 to_copy = 1;
10411 break;
10412 }
10413 wnd_sum -=
10414 skb_shinfo(skb)->frags[wnd_idx].size;
10415 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070010416 } else {
10417 /* in non-LSO too fragmented packet should always
10418 be linearized */
10419 to_copy = 1;
10420 }
10421 }
10422
10423exit_lbl:
10424 if (unlikely(to_copy))
10425 DP(NETIF_MSG_TX_QUEUED,
10426 "Linearization IS REQUIRED for %s packet. "
10427 "num_frags %d hlen %d first_bd_sz %d\n",
10428 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10429 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10430
10431 return to_copy;
10432}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010433#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070010434
10435/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010436 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070010437 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010438 */
10439static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10440{
10441 struct bnx2x *bp = netdev_priv(dev);
10442 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010443 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010444 struct sw_tx_bd *tx_buf;
10445 struct eth_tx_bd *tx_bd;
10446 struct eth_tx_parse_bd *pbd = NULL;
10447 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010448 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010449 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010450 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10451 int vlan_off = (bp->e1hov ? 4 : 0);
10452 int i;
10453 u8 hlen = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010454
10455#ifdef BNX2X_STOP_ON_ERROR
10456 if (unlikely(bp->panic))
10457 return NETDEV_TX_BUSY;
10458#endif
10459
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010460 fp_index = skb_get_queue_mapping(skb);
10461 txq = netdev_get_tx_queue(dev, fp_index);
10462
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010463 fp = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070010464
Yitchak Gertner231fd582008-08-25 15:27:06 -070010465 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010466 fp->eth_q_stats.driver_xoff++,
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010467 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010468 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10469 return NETDEV_TX_BUSY;
10470 }
10471
Eilon Greenstein755735e2008-06-23 20:35:13 -070010472 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10473 " gso type %x xmit_type %x\n",
10474 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10475 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10476
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010477#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000010478 /* First, check if we need to linearize the skb (due to FW
10479 restrictions). No need to check fragmentation if page size > 8K
10480 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070010481 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10482 /* Statistics of linearization */
10483 bp->lin_cnt++;
10484 if (skb_linearize(skb) != 0) {
10485 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10486 "silently dropping this SKB\n");
10487 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070010488 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010489 }
10490 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010491#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070010492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010493 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070010494 Please read carefully. First we use one BD which we mark as start,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010495 then for TSO or xsum we have a parsing info BD,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010496 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010497 (don't forget to mark the last one as last,
10498 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070010499 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010500 */
10501
10502 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010503 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010504
Eilon Greenstein755735e2008-06-23 20:35:13 -070010505 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010506 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10507 tx_bd = &fp->tx_desc_ring[bd_prod];
10508
10509 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10510 tx_bd->general_data = (UNICAST_ADDRESS <<
10511 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010512 /* header nbd */
10513 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010514
Eilon Greenstein755735e2008-06-23 20:35:13 -070010515 /* remember the first BD of the packet */
10516 tx_buf->first_bd = fp->tx_bd_prod;
10517 tx_buf->skb = skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010518
10519 DP(NETIF_MSG_TX_QUEUED,
10520 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10521 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10522
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010523#ifdef BCM_VLAN
10524 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10525 (bp->flags & HW_VLAN_TX_FLAG)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010526 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10527 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010528 vlan_off += 4;
10529 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010530#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010531 tx_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010532
10533 if (xmit_type) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070010534 /* turn on parsing and get a BD */
10535 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10536 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10537
10538 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10539 }
10540
10541 if (xmit_type & XMIT_CSUM) {
10542 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10543
10544 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010545 pbd->global_data =
10546 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10547 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070010548
10549 pbd->ip_hlen = (skb_transport_header(skb) -
10550 skb_network_header(skb)) / 2;
10551
10552 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10553
10554 pbd->total_hlen = cpu_to_le16(hlen);
10555 hlen = hlen*2 - vlan_off;
10556
10557 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10558
10559 if (xmit_type & XMIT_CSUM_V4)
10560 tx_bd->bd_flags.as_bitfield |=
10561 ETH_TX_BD_FLAGS_IP_CSUM;
10562 else
10563 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10564
10565 if (xmit_type & XMIT_CSUM_TCP) {
10566 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10567
10568 } else {
10569 s8 fix = SKB_CS_OFF(skb); /* signed! */
10570
10571 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10572 pbd->cs_offset = fix / 2;
10573
10574 DP(NETIF_MSG_TX_QUEUED,
10575 "hlen %d offset %d fix %d csum before fix %x\n",
10576 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10577 SKB_CS(skb));
10578
10579 /* HW bug: fixup the CSUM */
10580 pbd->tcp_pseudo_csum =
10581 bnx2x_csum_fix(skb_transport_header(skb),
10582 SKB_CS(skb), fix);
10583
10584 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10585 pbd->tcp_pseudo_csum);
10586 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010587 }
10588
10589 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010590 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010591
10592 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10593 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Eilon Greenstein6378c022008-08-13 15:59:25 -070010594 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010595 tx_bd->nbd = cpu_to_le16(nbd);
10596 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10597
10598 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070010599 " nbytes %d flags %x vlan %x\n",
10600 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10601 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10602 le16_to_cpu(tx_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010603
Eilon Greenstein755735e2008-06-23 20:35:13 -070010604 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010605
10606 DP(NETIF_MSG_TX_QUEUED,
10607 "TSO packet len %d hlen %d total len %d tso size %d\n",
10608 skb->len, hlen, skb_headlen(skb),
10609 skb_shinfo(skb)->gso_size);
10610
10611 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10612
Eilon Greenstein755735e2008-06-23 20:35:13 -070010613 if (unlikely(skb_headlen(skb) > hlen))
10614 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10615 bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010616
10617 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10618 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010619 pbd->tcp_flags = pbd_tcp_flags(skb);
10620
10621 if (xmit_type & XMIT_GSO_V4) {
10622 pbd->ip_id = swab16(ip_hdr(skb)->id);
10623 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010624 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10625 ip_hdr(skb)->daddr,
10626 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070010627
10628 } else
10629 pbd->tcp_pseudo_csum =
10630 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10631 &ipv6_hdr(skb)->daddr,
10632 0, IPPROTO_TCP, 0));
10633
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010634 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10635 }
10636
Eilon Greenstein755735e2008-06-23 20:35:13 -070010637 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10638 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010639
Eilon Greenstein755735e2008-06-23 20:35:13 -070010640 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10641 tx_bd = &fp->tx_desc_ring[bd_prod];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010642
Eilon Greenstein755735e2008-06-23 20:35:13 -070010643 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10644 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010645
Eilon Greenstein755735e2008-06-23 20:35:13 -070010646 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10647 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10648 tx_bd->nbytes = cpu_to_le16(frag->size);
10649 tx_bd->vlan = cpu_to_le16(pkt_prod);
10650 tx_bd->bd_flags.as_bitfield = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010651
Eilon Greenstein755735e2008-06-23 20:35:13 -070010652 DP(NETIF_MSG_TX_QUEUED,
10653 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10654 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10655 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010656 }
10657
Eilon Greenstein755735e2008-06-23 20:35:13 -070010658 /* now at last mark the BD as the last BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010659 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10660
10661 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10662 tx_bd, tx_bd->bd_flags.as_bitfield);
10663
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010664 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10665
Eilon Greenstein755735e2008-06-23 20:35:13 -070010666 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010667 * if the packet contains or ends with it
10668 */
10669 if (TX_BD_POFF(bd_prod) < nbd)
10670 nbd++;
10671
10672 if (pbd)
10673 DP(NETIF_MSG_TX_QUEUED,
10674 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10675 " tcp_flags %x xsum %x seq %u hlen %u\n",
10676 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10677 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010678 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010679
Eilon Greenstein755735e2008-06-23 20:35:13 -070010680 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010681
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010682 /*
10683 * Make sure that the BD data is updated before updating the producer
10684 * since FW might read the BD right after the producer is updated.
10685 * This is only applicable for weak-ordered memory model archs such
10686 * as IA-64. The following barrier is also mandatory since FW will
10687 * assumes packets must have BDs.
10688 */
10689 wmb();
10690
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010691 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010692 mb(); /* FW restriction: must not reorder writing nbd and packets */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010693 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010694 DOORBELL(bp, fp->index, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010695
10696 mmiowb();
10697
Eilon Greenstein755735e2008-06-23 20:35:13 -070010698 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010699
10700 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010701 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10702 if we put Tx into XOFF state. */
10703 smp_mb();
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010704 netif_tx_stop_queue(txq);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010705 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010706 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010707 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010708 }
10709 fp->tx_pkt++;
10710
10711 return NETDEV_TX_OK;
10712}
10713
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010714/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010715static int bnx2x_open(struct net_device *dev)
10716{
10717 struct bnx2x *bp = netdev_priv(dev);
10718
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000010719 netif_carrier_off(dev);
10720
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010721 bnx2x_set_power_state(bp, PCI_D0);
10722
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010723 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010724}
10725
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010726/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010727static int bnx2x_close(struct net_device *dev)
10728{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010729 struct bnx2x *bp = netdev_priv(dev);
10730
10731 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010732 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10733 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10734 if (!CHIP_REV_IS_SLOW(bp))
10735 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010736
10737 return 0;
10738}
10739
Eilon Greensteinf5372252009-02-12 08:38:30 +000010740/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010741static void bnx2x_set_rx_mode(struct net_device *dev)
10742{
10743 struct bnx2x *bp = netdev_priv(dev);
10744 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10745 int port = BP_PORT(bp);
10746
10747 if (bp->state != BNX2X_STATE_OPEN) {
10748 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10749 return;
10750 }
10751
10752 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10753
10754 if (dev->flags & IFF_PROMISC)
10755 rx_mode = BNX2X_RX_MODE_PROMISC;
10756
10757 else if ((dev->flags & IFF_ALLMULTI) ||
10758 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10759 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10760
10761 else { /* some multicasts */
10762 if (CHIP_IS_E1(bp)) {
10763 int i, old, offset;
10764 struct dev_mc_list *mclist;
10765 struct mac_configuration_cmd *config =
10766 bnx2x_sp(bp, mcast_config);
10767
10768 for (i = 0, mclist = dev->mc_list;
10769 mclist && (i < dev->mc_count);
10770 i++, mclist = mclist->next) {
10771
10772 config->config_table[i].
10773 cam_entry.msb_mac_addr =
10774 swab16(*(u16 *)&mclist->dmi_addr[0]);
10775 config->config_table[i].
10776 cam_entry.middle_mac_addr =
10777 swab16(*(u16 *)&mclist->dmi_addr[2]);
10778 config->config_table[i].
10779 cam_entry.lsb_mac_addr =
10780 swab16(*(u16 *)&mclist->dmi_addr[4]);
10781 config->config_table[i].cam_entry.flags =
10782 cpu_to_le16(port);
10783 config->config_table[i].
10784 target_table_entry.flags = 0;
10785 config->config_table[i].
10786 target_table_entry.client_id = 0;
10787 config->config_table[i].
10788 target_table_entry.vlan_id = 0;
10789
10790 DP(NETIF_MSG_IFUP,
10791 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10792 config->config_table[i].
10793 cam_entry.msb_mac_addr,
10794 config->config_table[i].
10795 cam_entry.middle_mac_addr,
10796 config->config_table[i].
10797 cam_entry.lsb_mac_addr);
10798 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010799 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010800 if (old > i) {
10801 for (; i < old; i++) {
10802 if (CAM_IS_INVALID(config->
10803 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000010804 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010805 break;
10806 }
10807 /* invalidate */
10808 CAM_INVALIDATE(config->
10809 config_table[i]);
10810 }
10811 }
10812
10813 if (CHIP_REV_IS_SLOW(bp))
10814 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10815 else
10816 offset = BNX2X_MAX_MULTICAST*(1 + port);
10817
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010818 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010819 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010820 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010821 config->hdr.reserved1 = 0;
10822
10823 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10824 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10825 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10826 0);
10827 } else { /* E1H */
10828 /* Accept one or more multicasts */
10829 struct dev_mc_list *mclist;
10830 u32 mc_filter[MC_HASH_SIZE];
10831 u32 crc, bit, regidx;
10832 int i;
10833
10834 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10835
10836 for (i = 0, mclist = dev->mc_list;
10837 mclist && (i < dev->mc_count);
10838 i++, mclist = mclist->next) {
10839
Johannes Berg7c510e42008-10-27 17:47:26 -070010840 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10841 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010842
10843 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10844 bit = (crc >> 24) & 0xff;
10845 regidx = bit >> 5;
10846 bit &= 0x1f;
10847 mc_filter[regidx] |= (1 << bit);
10848 }
10849
10850 for (i = 0; i < MC_HASH_SIZE; i++)
10851 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10852 mc_filter[i]);
10853 }
10854 }
10855
10856 bp->rx_mode = rx_mode;
10857 bnx2x_set_storm_rx_mode(bp);
10858}
10859
10860/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010861static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10862{
10863 struct sockaddr *addr = p;
10864 struct bnx2x *bp = netdev_priv(dev);
10865
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010866 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010867 return -EINVAL;
10868
10869 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010870 if (netif_running(dev)) {
10871 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070010872 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010873 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070010874 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010875 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010876
10877 return 0;
10878}
10879
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010880/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010881static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10882{
10883 struct mii_ioctl_data *data = if_mii(ifr);
10884 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010885 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010886 int err;
10887
10888 switch (cmd) {
10889 case SIOCGMIIPHY:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010890 data->phy_id = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010891
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010892 /* fallthrough */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010894 case SIOCGMIIREG: {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010895 u16 mii_regval;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010896
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010897 if (!netif_running(dev))
10898 return -EAGAIN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010899
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010900 mutex_lock(&bp->port.phy_mutex);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010901 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010902 DEFAULT_PHY_DEV_ADDR,
10903 (data->reg_num & 0x1f), &mii_regval);
10904 data->val_out = mii_regval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010905 mutex_unlock(&bp->port.phy_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010906 return err;
10907 }
10908
10909 case SIOCSMIIREG:
10910 if (!capable(CAP_NET_ADMIN))
10911 return -EPERM;
10912
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010913 if (!netif_running(dev))
10914 return -EAGAIN;
10915
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010916 mutex_lock(&bp->port.phy_mutex);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010917 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010918 DEFAULT_PHY_DEV_ADDR,
10919 (data->reg_num & 0x1f), data->val_in);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010920 mutex_unlock(&bp->port.phy_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010921 return err;
10922
10923 default:
10924 /* do nothing */
10925 break;
10926 }
10927
10928 return -EOPNOTSUPP;
10929}
10930
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010931/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010932static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10933{
10934 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010935 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010936
10937 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10938 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10939 return -EINVAL;
10940
10941 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010942 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010943 * only updated as part of load
10944 */
10945 dev->mtu = new_mtu;
10946
10947 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10949 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010950 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010951
10952 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010953}
10954
10955static void bnx2x_tx_timeout(struct net_device *dev)
10956{
10957 struct bnx2x *bp = netdev_priv(dev);
10958
10959#ifdef BNX2X_STOP_ON_ERROR
10960 if (!bp->panic)
10961 bnx2x_panic();
10962#endif
10963 /* This allows the netif to be shutdown gracefully before resetting */
10964 schedule_work(&bp->reset_task);
10965}
10966
10967#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010968/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010969static void bnx2x_vlan_rx_register(struct net_device *dev,
10970 struct vlan_group *vlgrp)
10971{
10972 struct bnx2x *bp = netdev_priv(dev);
10973
10974 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010975
10976 /* Set flags according to the required capabilities */
10977 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10978
10979 if (dev->features & NETIF_F_HW_VLAN_TX)
10980 bp->flags |= HW_VLAN_TX_FLAG;
10981
10982 if (dev->features & NETIF_F_HW_VLAN_RX)
10983 bp->flags |= HW_VLAN_RX_FLAG;
10984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010985 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080010986 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010987}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010989#endif
10990
10991#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10992static void poll_bnx2x(struct net_device *dev)
10993{
10994 struct bnx2x *bp = netdev_priv(dev);
10995
10996 disable_irq(bp->pdev->irq);
10997 bnx2x_interrupt(bp->pdev->irq, dev);
10998 enable_irq(bp->pdev->irq);
10999}
11000#endif
11001
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011002static const struct net_device_ops bnx2x_netdev_ops = {
11003 .ndo_open = bnx2x_open,
11004 .ndo_stop = bnx2x_close,
11005 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011006 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011007 .ndo_set_mac_address = bnx2x_change_mac_addr,
11008 .ndo_validate_addr = eth_validate_addr,
11009 .ndo_do_ioctl = bnx2x_ioctl,
11010 .ndo_change_mtu = bnx2x_change_mtu,
11011 .ndo_tx_timeout = bnx2x_tx_timeout,
11012#ifdef BCM_VLAN
11013 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11014#endif
11015#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11016 .ndo_poll_controller = poll_bnx2x,
11017#endif
11018};
11019
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011020static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11021 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011022{
11023 struct bnx2x *bp;
11024 int rc;
11025
11026 SET_NETDEV_DEV(dev, &pdev->dev);
11027 bp = netdev_priv(dev);
11028
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011029 bp->dev = dev;
11030 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011031 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011032 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011033
11034 rc = pci_enable_device(pdev);
11035 if (rc) {
11036 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11037 goto err_out;
11038 }
11039
11040 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11041 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11042 " aborting\n");
11043 rc = -ENODEV;
11044 goto err_out_disable;
11045 }
11046
11047 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11048 printk(KERN_ERR PFX "Cannot find second PCI device"
11049 " base address, aborting\n");
11050 rc = -ENODEV;
11051 goto err_out_disable;
11052 }
11053
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011054 if (atomic_read(&pdev->enable_cnt) == 1) {
11055 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11056 if (rc) {
11057 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11058 " aborting\n");
11059 goto err_out_disable;
11060 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011061
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011062 pci_set_master(pdev);
11063 pci_save_state(pdev);
11064 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011065
11066 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11067 if (bp->pm_cap == 0) {
11068 printk(KERN_ERR PFX "Cannot find power management"
11069 " capability, aborting\n");
11070 rc = -EIO;
11071 goto err_out_release;
11072 }
11073
11074 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11075 if (bp->pcie_cap == 0) {
11076 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11077 " aborting\n");
11078 rc = -EIO;
11079 goto err_out_release;
11080 }
11081
Yang Hongyang6a355282009-04-06 19:01:13 -070011082 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011083 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011084 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011085 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11086 " failed, aborting\n");
11087 rc = -EIO;
11088 goto err_out_release;
11089 }
11090
Yang Hongyang284901a2009-04-06 19:01:15 -070011091 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011092 printk(KERN_ERR PFX "System does not support DMA,"
11093 " aborting\n");
11094 rc = -EIO;
11095 goto err_out_release;
11096 }
11097
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011098 dev->mem_start = pci_resource_start(pdev, 0);
11099 dev->base_addr = dev->mem_start;
11100 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011101
11102 dev->irq = pdev->irq;
11103
Arjan van de Ven275f1652008-10-20 21:42:39 -070011104 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011105 if (!bp->regview) {
11106 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11107 rc = -ENOMEM;
11108 goto err_out_release;
11109 }
11110
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011111 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11112 min_t(u64, BNX2X_DB_SIZE,
11113 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011114 if (!bp->doorbells) {
11115 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11116 rc = -ENOMEM;
11117 goto err_out_unmap;
11118 }
11119
11120 bnx2x_set_power_state(bp, PCI_D0);
11121
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011122 /* clean indirect addresses */
11123 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11124 PCICFG_VENDOR_ID_OFFSET);
11125 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11126 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11127 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11128 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011129
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011130 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011131
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011132 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011133 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011134 dev->features |= NETIF_F_SG;
11135 dev->features |= NETIF_F_HW_CSUM;
11136 if (bp->flags & USING_DAC_FLAG)
11137 dev->features |= NETIF_F_HIGHDMA;
11138#ifdef BCM_VLAN
11139 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011140 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011141#endif
11142 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011143 dev->features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011144
11145 return 0;
11146
11147err_out_unmap:
11148 if (bp->regview) {
11149 iounmap(bp->regview);
11150 bp->regview = NULL;
11151 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011152 if (bp->doorbells) {
11153 iounmap(bp->doorbells);
11154 bp->doorbells = NULL;
11155 }
11156
11157err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011158 if (atomic_read(&pdev->enable_cnt) == 1)
11159 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011160
11161err_out_disable:
11162 pci_disable_device(pdev);
11163 pci_set_drvdata(pdev, NULL);
11164
11165err_out:
11166 return rc;
11167}
11168
Eliezer Tamir25047952008-02-28 11:50:16 -080011169static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11170{
11171 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11172
11173 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11174 return val;
11175}
11176
11177/* return value of 1=2.5GHz 2=5GHz */
11178static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11179{
11180 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11181
11182 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11183 return val;
11184}
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011185static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11186{
11187 struct bnx2x_fw_file_hdr *fw_hdr;
11188 struct bnx2x_fw_file_section *sections;
11189 u16 *ops_offsets;
11190 u32 offset, len, num_ops;
11191 int i;
11192 const struct firmware *firmware = bp->firmware;
11193 const u8 * fw_ver;
11194
11195 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11196 return -EINVAL;
11197
11198 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11199 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11200
11201 /* Make sure none of the offsets and sizes make us read beyond
11202 * the end of the firmware data */
11203 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11204 offset = be32_to_cpu(sections[i].offset);
11205 len = be32_to_cpu(sections[i].len);
11206 if (offset + len > firmware->size) {
11207 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11208 return -EINVAL;
11209 }
11210 }
11211
11212 /* Likewise for the init_ops offsets */
11213 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11214 ops_offsets = (u16 *)(firmware->data + offset);
11215 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11216
11217 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11218 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11219 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11220 return -EINVAL;
11221 }
11222 }
11223
11224 /* Check FW version */
11225 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11226 fw_ver = firmware->data + offset;
11227 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11228 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11229 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11230 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11231 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11232 " Should be %d.%d.%d.%d\n",
11233 fw_ver[0], fw_ver[1], fw_ver[2],
11234 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11235 BCM_5710_FW_MINOR_VERSION,
11236 BCM_5710_FW_REVISION_VERSION,
11237 BCM_5710_FW_ENGINEERING_VERSION);
11238 return -EINVAL;
11239 }
11240
11241 return 0;
11242}
11243
11244static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11245{
11246 u32 i;
11247 const __be32 *source = (const __be32*)_source;
11248 u32 *target = (u32*)_target;
11249
11250 for (i = 0; i < n/4; i++)
11251 target[i] = be32_to_cpu(source[i]);
11252}
11253
11254/*
11255 Ops array is stored in the following format:
11256 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11257 */
11258static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11259{
11260 u32 i, j, tmp;
11261 const __be32 *source = (const __be32*)_source;
11262 struct raw_op *target = (struct raw_op*)_target;
11263
11264 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11265 tmp = be32_to_cpu(source[j]);
11266 target[i].op = (tmp >> 24) & 0xff;
11267 target[i].offset = tmp & 0xffffff;
11268 target[i].raw_data = be32_to_cpu(source[j+1]);
11269 }
11270}
11271static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11272{
11273 u32 i;
11274 u16 *target = (u16*)_target;
11275 const __be16 *source = (const __be16*)_source;
11276
11277 for (i = 0; i < n/2; i++)
11278 target[i] = be16_to_cpu(source[i]);
11279}
11280
11281#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11282 do { \
11283 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11284 bp->arr = kmalloc(len, GFP_KERNEL); \
11285 if (!bp->arr) { \
11286 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11287 goto lbl; \
11288 } \
11289 func(bp->firmware->data + \
11290 be32_to_cpu(fw_hdr->arr.offset), \
11291 (u8*)bp->arr, len); \
11292 } while (0)
11293
11294
11295static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11296{
11297 char fw_file_name[40] = {0};
11298 int rc, offset;
11299 struct bnx2x_fw_file_hdr *fw_hdr;
11300
11301 /* Create a FW file name */
11302 if (CHIP_IS_E1(bp))
11303 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11304 else
11305 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11306
11307 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11308 BCM_5710_FW_MAJOR_VERSION,
11309 BCM_5710_FW_MINOR_VERSION,
11310 BCM_5710_FW_REVISION_VERSION,
11311 BCM_5710_FW_ENGINEERING_VERSION);
11312
11313 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11314
11315 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11316 if (rc) {
11317 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11318 goto request_firmware_exit;
11319 }
11320
11321 rc = bnx2x_check_firmware(bp);
11322 if (rc) {
11323 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11324 goto request_firmware_exit;
11325 }
11326
11327 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11328
11329 /* Initialize the pointers to the init arrays */
11330 /* Blob */
11331 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11332
11333 /* Opcodes */
11334 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11335
11336 /* Offsets */
11337 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11338
11339 /* STORMs firmware */
11340 bp->tsem_int_table_data = bp->firmware->data +
11341 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11342 bp->tsem_pram_data = bp->firmware->data +
11343 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11344 bp->usem_int_table_data = bp->firmware->data +
11345 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11346 bp->usem_pram_data = bp->firmware->data +
11347 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11348 bp->xsem_int_table_data = bp->firmware->data +
11349 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11350 bp->xsem_pram_data = bp->firmware->data +
11351 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11352 bp->csem_int_table_data = bp->firmware->data +
11353 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11354 bp->csem_pram_data = bp->firmware->data +
11355 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11356
11357 return 0;
11358init_offsets_alloc_err:
11359 kfree(bp->init_ops);
11360init_ops_alloc_err:
11361 kfree(bp->init_data);
11362request_firmware_exit:
11363 release_firmware(bp->firmware);
11364
11365 return rc;
11366}
11367
11368
Eliezer Tamir25047952008-02-28 11:50:16 -080011369
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011370static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11371 const struct pci_device_id *ent)
11372{
11373 static int version_printed;
11374 struct net_device *dev = NULL;
11375 struct bnx2x *bp;
Eliezer Tamir25047952008-02-28 11:50:16 -080011376 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011377
11378 if (version_printed++ == 0)
11379 printk(KERN_INFO "%s", version);
11380
11381 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011382 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011383 if (!dev) {
11384 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011385 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011386 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011388 bp = netdev_priv(dev);
11389 bp->msglevel = debug;
11390
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011391 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011392 if (rc < 0) {
11393 free_netdev(dev);
11394 return rc;
11395 }
11396
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011397 pci_set_drvdata(pdev, dev);
11398
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011399 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011400 if (rc)
11401 goto init_one_exit;
11402
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011403 /* Set init arrays */
11404 rc = bnx2x_init_firmware(bp, &pdev->dev);
11405 if (rc) {
11406 printk(KERN_ERR PFX "Error loading firmware\n");
11407 goto init_one_exit;
11408 }
11409
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011410 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011411 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011412 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011413 goto init_one_exit;
11414 }
11415
Eliezer Tamir25047952008-02-28 11:50:16 -080011416 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000011417 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011418 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eliezer Tamir25047952008-02-28 11:50:16 -080011419 bnx2x_get_pcie_width(bp),
11420 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11421 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070011422 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000011423
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011424 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011425
11426init_one_exit:
11427 if (bp->regview)
11428 iounmap(bp->regview);
11429
11430 if (bp->doorbells)
11431 iounmap(bp->doorbells);
11432
11433 free_netdev(dev);
11434
11435 if (atomic_read(&pdev->enable_cnt) == 1)
11436 pci_release_regions(pdev);
11437
11438 pci_disable_device(pdev);
11439 pci_set_drvdata(pdev, NULL);
11440
11441 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011442}
11443
11444static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11445{
11446 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011447 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011448
Eliezer Tamir228241e2008-02-28 11:56:57 -080011449 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080011450 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11451 return;
11452 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011453 bp = netdev_priv(dev);
11454
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011455 unregister_netdev(dev);
11456
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011457 kfree(bp->init_ops_offsets);
11458 kfree(bp->init_ops);
11459 kfree(bp->init_data);
11460 release_firmware(bp->firmware);
11461
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011462 if (bp->regview)
11463 iounmap(bp->regview);
11464
11465 if (bp->doorbells)
11466 iounmap(bp->doorbells);
11467
11468 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011469
11470 if (atomic_read(&pdev->enable_cnt) == 1)
11471 pci_release_regions(pdev);
11472
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011473 pci_disable_device(pdev);
11474 pci_set_drvdata(pdev, NULL);
11475}
11476
11477static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11478{
11479 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011480 struct bnx2x *bp;
11481
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011482 if (!dev) {
11483 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11484 return -ENODEV;
11485 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011486 bp = netdev_priv(dev);
11487
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011488 rtnl_lock();
11489
11490 pci_save_state(pdev);
11491
11492 if (!netif_running(dev)) {
11493 rtnl_unlock();
11494 return 0;
11495 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011496
11497 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011498
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011499 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011500
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011501 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080011502
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011503 rtnl_unlock();
11504
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011505 return 0;
11506}
11507
11508static int bnx2x_resume(struct pci_dev *pdev)
11509{
11510 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011511 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011512 int rc;
11513
Eliezer Tamir228241e2008-02-28 11:56:57 -080011514 if (!dev) {
11515 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11516 return -ENODEV;
11517 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011518 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011519
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011520 rtnl_lock();
11521
Eliezer Tamir228241e2008-02-28 11:56:57 -080011522 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011523
11524 if (!netif_running(dev)) {
11525 rtnl_unlock();
11526 return 0;
11527 }
11528
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011529 bnx2x_set_power_state(bp, PCI_D0);
11530 netif_device_attach(dev);
11531
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011532 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011533
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011534 rtnl_unlock();
11535
11536 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011537}
11538
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011539static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11540{
11541 int i;
11542
11543 bp->state = BNX2X_STATE_ERROR;
11544
11545 bp->rx_mode = BNX2X_RX_MODE_NONE;
11546
11547 bnx2x_netif_stop(bp, 0);
11548
11549 del_timer_sync(&bp->timer);
11550 bp->stats_state = STATS_STATE_DISABLED;
11551 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11552
11553 /* Release IRQs */
11554 bnx2x_free_irq(bp);
11555
11556 if (CHIP_IS_E1(bp)) {
11557 struct mac_configuration_cmd *config =
11558 bnx2x_sp(bp, mcast_config);
11559
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011560 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011561 CAM_INVALIDATE(config->config_table[i]);
11562 }
11563
11564 /* Free SKBs, SGEs, TPA pool and driver internals */
11565 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011566 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011567 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011568 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000011569 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011570 bnx2x_free_mem(bp);
11571
11572 bp->state = BNX2X_STATE_CLOSED;
11573
11574 netif_carrier_off(bp->dev);
11575
11576 return 0;
11577}
11578
11579static void bnx2x_eeh_recover(struct bnx2x *bp)
11580{
11581 u32 val;
11582
11583 mutex_init(&bp->port.phy_mutex);
11584
11585 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11586 bp->link_params.shmem_base = bp->common.shmem_base;
11587 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11588
11589 if (!bp->common.shmem_base ||
11590 (bp->common.shmem_base < 0xA0000) ||
11591 (bp->common.shmem_base >= 0xC0000)) {
11592 BNX2X_DEV_INFO("MCP not active\n");
11593 bp->flags |= NO_MCP_FLAG;
11594 return;
11595 }
11596
11597 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11598 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11599 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11600 BNX2X_ERR("BAD MCP validity signature\n");
11601
11602 if (!BP_NOMCP(bp)) {
11603 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11604 & DRV_MSG_SEQ_NUMBER_MASK);
11605 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11606 }
11607}
11608
Wendy Xiong493adb12008-06-23 20:36:22 -070011609/**
11610 * bnx2x_io_error_detected - called when PCI error is detected
11611 * @pdev: Pointer to PCI device
11612 * @state: The current pci connection state
11613 *
11614 * This function is called after a PCI bus error affecting
11615 * this device has been detected.
11616 */
11617static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11618 pci_channel_state_t state)
11619{
11620 struct net_device *dev = pci_get_drvdata(pdev);
11621 struct bnx2x *bp = netdev_priv(dev);
11622
11623 rtnl_lock();
11624
11625 netif_device_detach(dev);
11626
11627 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011628 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070011629
11630 pci_disable_device(pdev);
11631
11632 rtnl_unlock();
11633
11634 /* Request a slot reset */
11635 return PCI_ERS_RESULT_NEED_RESET;
11636}
11637
11638/**
11639 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11640 * @pdev: Pointer to PCI device
11641 *
11642 * Restart the card from scratch, as if from a cold-boot.
11643 */
11644static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11645{
11646 struct net_device *dev = pci_get_drvdata(pdev);
11647 struct bnx2x *bp = netdev_priv(dev);
11648
11649 rtnl_lock();
11650
11651 if (pci_enable_device(pdev)) {
11652 dev_err(&pdev->dev,
11653 "Cannot re-enable PCI device after reset\n");
11654 rtnl_unlock();
11655 return PCI_ERS_RESULT_DISCONNECT;
11656 }
11657
11658 pci_set_master(pdev);
11659 pci_restore_state(pdev);
11660
11661 if (netif_running(dev))
11662 bnx2x_set_power_state(bp, PCI_D0);
11663
11664 rtnl_unlock();
11665
11666 return PCI_ERS_RESULT_RECOVERED;
11667}
11668
11669/**
11670 * bnx2x_io_resume - called when traffic can start flowing again
11671 * @pdev: Pointer to PCI device
11672 *
11673 * This callback is called when the error recovery driver tells us that
11674 * its OK to resume normal operation.
11675 */
11676static void bnx2x_io_resume(struct pci_dev *pdev)
11677{
11678 struct net_device *dev = pci_get_drvdata(pdev);
11679 struct bnx2x *bp = netdev_priv(dev);
11680
11681 rtnl_lock();
11682
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011683 bnx2x_eeh_recover(bp);
11684
Wendy Xiong493adb12008-06-23 20:36:22 -070011685 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011686 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070011687
11688 netif_device_attach(dev);
11689
11690 rtnl_unlock();
11691}
11692
11693static struct pci_error_handlers bnx2x_err_handler = {
11694 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011695 .slot_reset = bnx2x_io_slot_reset,
11696 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070011697};
11698
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011699static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070011700 .name = DRV_MODULE_NAME,
11701 .id_table = bnx2x_pci_tbl,
11702 .probe = bnx2x_init_one,
11703 .remove = __devexit_p(bnx2x_remove_one),
11704 .suspend = bnx2x_suspend,
11705 .resume = bnx2x_resume,
11706 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011707};
11708
11709static int __init bnx2x_init(void)
11710{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000011711 int ret;
11712
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080011713 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11714 if (bnx2x_wq == NULL) {
11715 printk(KERN_ERR PFX "Cannot create workqueue\n");
11716 return -ENOMEM;
11717 }
11718
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000011719 ret = pci_register_driver(&bnx2x_pci_driver);
11720 if (ret) {
11721 printk(KERN_ERR PFX "Cannot register driver\n");
11722 destroy_workqueue(bnx2x_wq);
11723 }
11724 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011725}
11726
11727static void __exit bnx2x_cleanup(void)
11728{
11729 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080011730
11731 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011732}
11733
11734module_init(bnx2x_init);
11735module_exit(bnx2x_cleanup);
11736
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011737