blob: 6f16547a1b8a7c45c8789ea4c0154b17f59f29b2 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
52
Eilon Greenstein359d8b12009-02-12 08:38:25 +000053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000057#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Eilon Greensteinb2d76442009-10-15 00:19:31 -070059#define DRV_MODULE_VERSION "1.52.1-1"
60#define DRV_MODULE_RELDATE "2009/10/13"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070061#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020062
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070063#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000066#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070068
Eilon Greenstein34f80b02008-06-23 20:33:01 -070069/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020071
Andrew Morton53a10562008-02-09 23:16:41 -080072static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070073 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070076MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000077MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020078MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080
Eilon Greenstein555f6c72009-02-12 08:36:11 +000081static int multi_mode = 1;
82module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070083MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000095
Eilon Greenstein19680c42008-08-13 15:47:33 -070096static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070097module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000098MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000099
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
Eilon Greensteina18f5122009-08-12 08:23:26 +0000104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
Eilon Greenstein9898f862009-02-12 08:38:27 +0000108static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200109module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
Eilon Greenstein9898f862009-02-12 08:38:27 +0000116static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200117module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200128};
129
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700130/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800131static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200132 char *name;
133} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200137};
138
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700139
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140static const struct pci_device_id bnx2x_pci_tbl[] = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200202{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000203 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000216 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200217
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200221#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200223#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000224 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200225#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200236
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200247
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000248 mutex_lock(&bp->dmae_mutex);
249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250 *wb_comp = 0;
251
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253
254 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700259 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000260 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200261 break;
262 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700263 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200269 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700270
271 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272}
273
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000276 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000291 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200292
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200296#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200298#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000299 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200300#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200311
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200319
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200323 *wb_comp = 0;
324
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326
327 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000332 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200333 break;
334 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700335 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200341 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345
346 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200347}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200348
Eilon Greenstein573f2032009-08-12 08:24:14 +0000349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
372}
373
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200387 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200396
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200408
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200416 }
417 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
500 }
501 }
502
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200503 return rc;
504}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000509 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800513 mark = ((mark + 0x3) & ~0x3);
Joe Perchesad361c92009-07-06 13:05:40 -0700514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200515
Joe Perchesad361c92009-07-06 13:05:40 -0700516 printk(KERN_ERR PFX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800522 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800529 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200530 }
Joe Perchesad361c92009-07-06 13:05:40 -0700531 printk(KERN_ERR PFX "end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200542 BNX2X_ERR("begin crash dump -----------------\n");
543
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i];
555
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
559 i, fp->rx_bd_prod, fp->rx_bd_cons,
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
568
569 /* Tx */
570 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200571 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200572
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700579 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700580 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000581 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200582
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000590 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200596 }
597
Eilon Greenstein3196a882008-08-13 15:58:49 -0700598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000600 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700606 }
607
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200615 }
616 }
617
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000638 }
639 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200640
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700641 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644}
645
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800646static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700648 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200653
654 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669
Eilon Greenstein8badd272009-02-12 08:36:15 +0000670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
673 REG_WR(bp, addr, val);
674
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
Eilon Greenstein8badd272009-02-12 08:36:15 +0000678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200680
681 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700692 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200704}
705
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800706static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700708 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
Eilon Greenstein8badd272009-02-12 08:36:15 +0000720 /* flush all outstanding writes */
721 mmiowb();
722
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200729{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000731 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700733 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200734 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200740
741 /* make sure all ISRs are done */
742 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000745#ifdef BCM_CNIC
746 offset++;
747#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200748 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000749 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200750 } else
751 synchronize_irq(bp->pdev->irq);
752
753 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200756}
757
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700758/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759
760/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762 */
763
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 u8 storm, u16 index, u8 op, u8 update)
766{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200769 struct igu_ack_register igu_ack;
770
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
Eilon Greenstein5c862842008-08-13 15:51:48 -0700778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000781
782 /* Make sure that ACK is written */
783 mmiowb();
784 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200785}
786
787static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788{
789 struct host_status_block *fpsb = fp->status_blk;
790 u16 rc = 0;
791
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795 rc |= 1;
796 }
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 rc |= 2;
800 }
801 return rc;
802}
803
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804static u16 bnx2x_ack_int(struct bnx2x *bp)
805{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200809
Eilon Greenstein5c862842008-08-13 15:51:48 -0700810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200812
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200813 return result;
814}
815
816
817/*
818 * fast path service functions
819 */
820
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800821static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822{
823 /* Tell compiler that consumer and producer can change */
824 barrier();
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000826}
827
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200828/* free skb in the packet ring at pos idx
829 * return idx of last bd freed
830 */
831static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 idx)
833{
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200837 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 int nbd;
840
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 idx, tx_buf, skb);
843
844 /* unmap first bd */
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200851#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700853 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200854 bnx2x_panic();
855 }
856#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200858
Eilon Greensteinca003922009-08-12 22:53:28 -0700859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861
862 /* Skip a parse bd... */
863 --nbd;
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 }
871
872 /* now free frags */
873 while (nbd > 0) {
874
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200879 if (--nbd)
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 }
882
883 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700884 WARN_ON(!skb);
Eilon Greensteinca003922009-08-12 22:53:28 -0700885 dev_kfree_skb_any(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 tx_buf->first_bd = 0;
887 tx_buf->skb = NULL;
888
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700889 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890}
891
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200893{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700894 s16 used;
895 u16 prod;
896 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700898 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
901
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200905
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700906#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700907 WARN_ON(used < 0);
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700912 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200913}
914
Eilon Greenstein7961f792009-03-02 07:59:31 +0000915static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200916{
917 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000918 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920 int done = 0;
921
922#ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
924 return;
925#endif
926
Eilon Greensteinca003922009-08-12 22:53:28 -0700927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != hw_cons) {
932 u16 pkt_cons;
933
934 pkt_cons = TX_BD(sw_cons);
935
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200939 hw_cons, sw_cons, pkt_cons);
940
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700941/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200942 rmb();
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944 }
945*/
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947 sw_cons++;
948 done++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 }
950
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
953
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200954 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000955 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200956
Eilon Greenstein60447352009-03-02 07:59:24 +0000957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
961 * forever.
962 */
963 smp_mb();
964
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000965 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700966 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000968 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200969 }
970}
971
Michael Chan993ac7b2009-10-10 13:46:56 +0000972#ifdef BCM_CNIC
973static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700975
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200976static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
978{
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700983 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000985 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700986 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200987
988 bp->spq_left++;
989
Eilon Greenstein0626b892009-02-12 08:38:14 +0000990 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001 cid);
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
1005 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1008 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001009 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001010 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001011 return;
1012 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001013
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1018 break;
1019
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1024 break;
1025
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001029 break;
1030
Michael Chan993ac7b2009-10-10 13:46:56 +00001031#ifdef BCM_CNIC
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1035 break;
1036#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001037
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001041 bp->set_mac_pending--;
1042 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001043 break;
1044
Eliezer Tamir49d66772008-02-28 11:53:13 -08001045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001046 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001047 bp->set_mac_pending--;
1048 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001049 break;
1050
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001051 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001052 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001053 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001054 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001056 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001057}
1058
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001059static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063 struct page *page = sw_buf->page;
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065
1066 /* Skip "next page" elements */
1067 if (!page)
1068 return;
1069
1070 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001071 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001072 __free_pages(page, PAGES_PER_SGE_SHIFT);
1073
1074 sw_buf->page = NULL;
1075 sge->addr_hi = 0;
1076 sge->addr_lo = 0;
1077}
1078
1079static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, int last)
1081{
1082 int i;
1083
1084 for (i = 0; i < last; i++)
1085 bnx2x_free_rx_sge(bp, fp, i);
1086}
1087
1088static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089 struct bnx2x_fastpath *fp, u16 index)
1090{
1091 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1094 dma_addr_t mapping;
1095
1096 if (unlikely(page == NULL))
1097 return -ENOMEM;
1098
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001099 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001100 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001101 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001102 __free_pages(page, PAGES_PER_SGE_SHIFT);
1103 return -ENOMEM;
1104 }
1105
1106 sw_buf->page = page;
1107 pci_unmap_addr_set(sw_buf, mapping, mapping);
1108
1109 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1111
1112 return 0;
1113}
1114
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001115static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116 struct bnx2x_fastpath *fp, u16 index)
1117{
1118 struct sk_buff *skb;
1119 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1121 dma_addr_t mapping;
1122
1123 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124 if (unlikely(skb == NULL))
1125 return -ENOMEM;
1126
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001127 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001128 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001129 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001130 dev_kfree_skb(skb);
1131 return -ENOMEM;
1132 }
1133
1134 rx_buf->skb = skb;
1135 pci_unmap_addr_set(rx_buf, mapping, mapping);
1136
1137 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1139
1140 return 0;
1141}
1142
1143/* note that we are not allocating a new skb,
1144 * we are just moving one from cons to prod
1145 * we are not creating a new mapping,
1146 * so there is no need to check for dma_mapping_error().
1147 */
1148static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149 struct sk_buff *skb, u16 cons, u16 prod)
1150{
1151 struct bnx2x *bp = fp->bp;
1152 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1156
1157 pci_dma_sync_single_for_device(bp->pdev,
1158 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001159 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001160
1161 prod_rx_buf->skb = cons_rx_buf->skb;
1162 pci_unmap_addr_set(prod_rx_buf, mapping,
1163 pci_unmap_addr(cons_rx_buf, mapping));
1164 *prod_bd = *cons_bd;
1165}
1166
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001167static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1168 u16 idx)
1169{
1170 u16 last_max = fp->last_max_sge;
1171
1172 if (SUB_S16(idx, last_max) > 0)
1173 fp->last_max_sge = idx;
1174}
1175
1176static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1177{
1178 int i, j;
1179
1180 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181 int idx = RX_SGE_CNT * i - 1;
1182
1183 for (j = 0; j < 2; j++) {
1184 SGE_MASK_CLEAR_BIT(fp, idx);
1185 idx--;
1186 }
1187 }
1188}
1189
1190static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191 struct eth_fast_path_rx_cqe *fp_cqe)
1192{
1193 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001194 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001195 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001196 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001197 u16 last_max, last_elem, first_elem;
1198 u16 delta = 0;
1199 u16 i;
1200
1201 if (!sge_len)
1202 return;
1203
1204 /* First mark all used pages */
1205 for (i = 0; i < sge_len; i++)
1206 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1207
1208 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1210
1211 /* Here we assume that the last SGE index is the biggest */
1212 prefetch((void *)(fp->sge_mask));
1213 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1214
1215 last_max = RX_SGE(fp->last_max_sge);
1216 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1218
1219 /* If ring is not full */
1220 if (last_elem + 1 != first_elem)
1221 last_elem++;
1222
1223 /* Now update the prod */
1224 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225 if (likely(fp->sge_mask[i]))
1226 break;
1227
1228 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229 delta += RX_SGE_MASK_ELEM_SZ;
1230 }
1231
1232 if (delta > 0) {
1233 fp->rx_sge_prod += delta;
1234 /* clear page-end entries */
1235 bnx2x_clear_sge_mask_next_elems(fp);
1236 }
1237
1238 DP(NETIF_MSG_RX_STATUS,
1239 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1240 fp->last_max_sge, fp->rx_sge_prod);
1241}
1242
1243static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1244{
1245 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246 memset(fp->sge_mask, 0xff,
1247 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1248
Eilon Greenstein33471622008-08-13 15:59:08 -07001249 /* Clear the two last indices in the page to 1:
1250 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001251 hence will never be indicated and should be removed from
1252 the calculations. */
1253 bnx2x_clear_sge_mask_next_elems(fp);
1254}
1255
1256static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257 struct sk_buff *skb, u16 cons, u16 prod)
1258{
1259 struct bnx2x *bp = fp->bp;
1260 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1263 dma_addr_t mapping;
1264
1265 /* move empty skb from pool to prod and map it */
1266 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001268 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001269 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1270
1271 /* move partial skb from cons to pool (don't unmap yet) */
1272 fp->tpa_pool[queue] = *cons_rx_buf;
1273
1274 /* mark bin state as start - print error if current state != stop */
1275 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1277
1278 fp->tpa_state[queue] = BNX2X_TPA_START;
1279
1280 /* point prod_bd to new skb */
1281 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1283
1284#ifdef BNX2X_STOP_ON_ERROR
1285 fp->tpa_queue_used |= (1 << queue);
1286#ifdef __powerpc64__
1287 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1288#else
1289 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1290#endif
1291 fp->tpa_queue_used);
1292#endif
1293}
1294
1295static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 struct sk_buff *skb,
1297 struct eth_fast_path_rx_cqe *fp_cqe,
1298 u16 cqe_idx)
1299{
1300 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001301 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302 u32 i, frag_len, frag_size, pages;
1303 int err;
1304 int j;
1305
1306 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001307 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308
1309 /* This is needed in order to enable forwarding support */
1310 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001311 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001312 max(frag_size, (u32)len_on_bd));
1313
1314#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001315 if (pages >
1316 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1318 pages, cqe_idx);
1319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1320 fp_cqe->pkt_len, len_on_bd);
1321 bnx2x_panic();
1322 return -EINVAL;
1323 }
1324#endif
1325
1326 /* Run through the SGL and compose the fragmented skb */
1327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1329
1330 /* FW gives the indices of the SGE as if the ring is an array
1331 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001332 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001333 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001334 old_rx_pg = *rx_pg;
1335
1336 /* If we fail to allocate a substitute page, we simply stop
1337 where we are and drop the whole packet */
1338 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001340 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001341 return err;
1342 }
1343
1344 /* Unmap the page as we r going to pass it to the stack */
1345 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001346 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001347
1348 /* Add one frag and update the appropriate fields in the skb */
1349 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1350
1351 skb->data_len += frag_len;
1352 skb->truesize += frag_len;
1353 skb->len += frag_len;
1354
1355 frag_size -= frag_len;
1356 }
1357
1358 return 0;
1359}
1360
1361static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1363 u16 cqe_idx)
1364{
1365 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366 struct sk_buff *skb = rx_buf->skb;
1367 /* alloc new skb */
1368 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1369
1370 /* Unmap skb in the pool anyway, as we are going to change
1371 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1372 fails. */
1373 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001374 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001375
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001376 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001377 /* fix ip xsum and give it to the stack */
1378 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001379#ifdef BCM_VLAN
1380 int is_vlan_cqe =
1381 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382 PARSING_FLAGS_VLAN);
1383 int is_not_hwaccel_vlan_cqe =
1384 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1385#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001386
1387 prefetch(skb);
1388 prefetch(((char *)(skb)) + 128);
1389
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001390#ifdef BNX2X_STOP_ON_ERROR
1391 if (pad + len > bp->rx_buf_size) {
1392 BNX2X_ERR("skb_put is about to fail... "
1393 "pad %d len %d rx_buf_size %d\n",
1394 pad, len, bp->rx_buf_size);
1395 bnx2x_panic();
1396 return;
1397 }
1398#endif
1399
1400 skb_reserve(skb, pad);
1401 skb_put(skb, len);
1402
1403 skb->protocol = eth_type_trans(skb, bp->dev);
1404 skb->ip_summed = CHECKSUM_UNNECESSARY;
1405
1406 {
1407 struct iphdr *iph;
1408
1409 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001410#ifdef BCM_VLAN
1411 /* If there is no Rx VLAN offloading -
1412 take VLAN tag into an account */
1413 if (unlikely(is_not_hwaccel_vlan_cqe))
1414 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1415#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001416 iph->check = 0;
1417 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1418 }
1419
1420 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421 &cqe->fast_path_cqe, cqe_idx)) {
1422#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001423 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001425 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426 le16_to_cpu(cqe->fast_path_cqe.
1427 vlan_tag));
1428 else
1429#endif
1430 netif_receive_skb(skb);
1431 } else {
1432 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433 " - dropping packet!\n");
1434 dev_kfree_skb(skb);
1435 }
1436
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001437
1438 /* put new skb in bin */
1439 fp->tpa_pool[queue].skb = new_skb;
1440
1441 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001442 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001443 DP(NETIF_MSG_RX_STATUS,
1444 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001445 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001446 }
1447
1448 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1449}
1450
1451static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452 struct bnx2x_fastpath *fp,
1453 u16 bd_prod, u16 rx_comp_prod,
1454 u16 rx_sge_prod)
1455{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001456 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001457 int i;
1458
1459 /* Update producers */
1460 rx_prods.bd_prod = bd_prod;
1461 rx_prods.cqe_prod = rx_comp_prod;
1462 rx_prods.sge_prod = rx_sge_prod;
1463
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001464 /*
1465 * Make sure that the BD and SGE data is updated before updating the
1466 * producers since FW might read the BD/SGE right after the producer
1467 * is updated.
1468 * This is only applicable for weak-ordered memory model archs such
1469 * as IA-64. The following barrier is also mandatory since FW will
1470 * assumes BDs must have buffers.
1471 */
1472 wmb();
1473
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001474 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001476 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001477 ((u32 *)&rx_prods)[i]);
1478
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001479 mmiowb(); /* keep prod updates ordered */
1480
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001481 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001482 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1483 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001484}
1485
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001486static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1487{
1488 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001489 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001490 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1491 int rx_pkt = 0;
1492
1493#ifdef BNX2X_STOP_ON_ERROR
1494 if (unlikely(bp->panic))
1495 return 0;
1496#endif
1497
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001498 /* CQ "next element" is of the size of the regular element,
1499 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001500 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1502 hw_comp_cons++;
1503
1504 bd_cons = fp->rx_bd_cons;
1505 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001506 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507 sw_comp_cons = fp->rx_comp_cons;
1508 sw_comp_prod = fp->rx_comp_prod;
1509
1510 /* Memory barrier necessary as speculative reads of the rx
1511 * buffer can be ahead of the index in the status block
1512 */
1513 rmb();
1514
1515 DP(NETIF_MSG_RX_STATUS,
1516 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001517 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001518
1519 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001520 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001521 struct sk_buff *skb;
1522 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001523 u8 cqe_fp_flags;
1524 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001525
1526 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527 bd_prod = RX_BD(bd_prod);
1528 bd_cons = RX_BD(bd_cons);
1529
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001530 /* Prefetch the page containing the BD descriptor
1531 at producer's index. It will be needed when new skb is
1532 allocated */
1533 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534 (&fp->rx_desc_ring[bd_prod])) -
1535 PAGE_SIZE + 1));
1536
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001537 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001538 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001539
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001540 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001541 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1542 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001543 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001544 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001546
1547 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001548 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001549 bnx2x_sp_event(fp, cqe);
1550 goto next_cqe;
1551
1552 /* this is an rx packet */
1553 } else {
1554 rx_buf = &fp->rx_buf_ring[bd_cons];
1555 skb = rx_buf->skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001556 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557 pad = cqe->fast_path_cqe.placement_offset;
1558
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001559 /* If CQE is marked both TPA_START and TPA_END
1560 it is a non-TPA CQE */
1561 if ((!fp->disable_tpa) &&
1562 (TPA_TYPE(cqe_fp_flags) !=
1563 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001564 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001565
1566 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567 DP(NETIF_MSG_RX_STATUS,
1568 "calling tpa_start on queue %d\n",
1569 queue);
1570
1571 bnx2x_tpa_start(fp, queue, skb,
1572 bd_cons, bd_prod);
1573 goto next_rx;
1574 }
1575
1576 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577 DP(NETIF_MSG_RX_STATUS,
1578 "calling tpa_stop on queue %d\n",
1579 queue);
1580
1581 if (!BNX2X_RX_SUM_FIX(cqe))
1582 BNX2X_ERR("STOP on none TCP "
1583 "data\n");
1584
1585 /* This is a size of the linear data
1586 on this skb */
1587 len = le16_to_cpu(cqe->fast_path_cqe.
1588 len_on_bd);
1589 bnx2x_tpa_stop(bp, fp, queue, pad,
1590 len, cqe, comp_ring_cons);
1591#ifdef BNX2X_STOP_ON_ERROR
1592 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001593 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001594#endif
1595
1596 bnx2x_update_sge_prod(fp,
1597 &cqe->fast_path_cqe);
1598 goto next_cqe;
1599 }
1600 }
1601
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001602 pci_dma_sync_single_for_device(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 pad + RX_COPY_THRESH,
1605 PCI_DMA_FROMDEVICE);
1606 prefetch(skb);
1607 prefetch(((char *)(skb)) + 128);
1608
1609 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001610 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001611 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001612 "ERROR flags %x rx packet %u\n",
1613 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001614 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001615 goto reuse_rx;
1616 }
1617
1618 /* Since we don't have a jumbo ring
1619 * copy small packets if mtu > 1500
1620 */
1621 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622 (len <= RX_COPY_THRESH)) {
1623 struct sk_buff *new_skb;
1624
1625 new_skb = netdev_alloc_skb(bp->dev,
1626 len + pad);
1627 if (new_skb == NULL) {
1628 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001629 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001630 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001631 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001632 goto reuse_rx;
1633 }
1634
1635 /* aligned copy */
1636 skb_copy_from_linear_data_offset(skb, pad,
1637 new_skb->data + pad, len);
1638 skb_reserve(new_skb, pad);
1639 skb_put(new_skb, len);
1640
1641 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1642
1643 skb = new_skb;
1644
Eilon Greensteina119a062009-08-12 08:23:23 +00001645 } else
1646 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001647 pci_unmap_single(bp->pdev,
1648 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001649 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001650 PCI_DMA_FROMDEVICE);
1651 skb_reserve(skb, pad);
1652 skb_put(skb, len);
1653
1654 } else {
1655 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001656 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001657 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001658 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001659reuse_rx:
1660 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1661 goto next_rx;
1662 }
1663
1664 skb->protocol = eth_type_trans(skb, bp->dev);
1665
1666 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001667 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001668 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001670 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001671 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001672 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001673 }
1674
Eilon Greenstein748e5432009-02-12 08:36:37 +00001675 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001676
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001677#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001678 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001679 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001681 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1683 else
1684#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001685 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001687
1688next_rx:
1689 rx_buf->skb = NULL;
1690
1691 bd_cons = NEXT_RX_IDX(bd_cons);
1692 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001693 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1694 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695next_cqe:
1696 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001698
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001699 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001700 break;
1701 } /* while */
1702
1703 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001704 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001705 fp->rx_comp_cons = sw_comp_cons;
1706 fp->rx_comp_prod = sw_comp_prod;
1707
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001708 /* Update producers */
1709 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1710 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001711
1712 fp->rx_pkt += rx_pkt;
1713 fp->rx_calls++;
1714
1715 return rx_pkt;
1716}
1717
1718static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1719{
1720 struct bnx2x_fastpath *fp = fp_cookie;
1721 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001722
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001723 /* Return here if interrupt is disabled */
1724 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1726 return IRQ_HANDLED;
1727 }
1728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001729 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001730 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001731 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001732
1733#ifdef BNX2X_STOP_ON_ERROR
1734 if (unlikely(bp->panic))
1735 return IRQ_HANDLED;
1736#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07001737 /* Handle Rx or Tx according to MSI-X vector */
1738 if (fp->is_rx_queue) {
1739 prefetch(fp->rx_cons_sb);
1740 prefetch(&fp->status_blk->u_status_block.status_block_index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741
Eilon Greensteinca003922009-08-12 22:53:28 -07001742 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001743
Eilon Greensteinca003922009-08-12 22:53:28 -07001744 } else {
1745 prefetch(fp->tx_cons_sb);
1746 prefetch(&fp->status_blk->c_status_block.status_block_index);
1747
1748 bnx2x_update_fpsb_idx(fp);
1749 rmb();
1750 bnx2x_tx_int(fp);
1751
1752 /* Re-enable interrupts */
1753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1757 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001758
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001759 return IRQ_HANDLED;
1760}
1761
1762static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1763{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001764 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001766 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001767 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001768
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001769 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770 if (unlikely(status == 0)) {
1771 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1772 return IRQ_NONE;
1773 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001774 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001775
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001776 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001777 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1779 return IRQ_HANDLED;
1780 }
1781
Eilon Greenstein3196a882008-08-13 15:58:49 -07001782#ifdef BNX2X_STOP_ON_ERROR
1783 if (unlikely(bp->panic))
1784 return IRQ_HANDLED;
1785#endif
1786
Eilon Greensteinca003922009-08-12 22:53:28 -07001787 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001789
Eilon Greensteinca003922009-08-12 22:53:28 -07001790 mask = 0x2 << fp->sb_id;
1791 if (status & mask) {
1792 /* Handle Rx or Tx according to SB id */
1793 if (fp->is_rx_queue) {
1794 prefetch(fp->rx_cons_sb);
1795 prefetch(&fp->status_blk->u_status_block.
1796 status_block_index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797
Eilon Greensteinca003922009-08-12 22:53:28 -07001798 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001799
Eilon Greensteinca003922009-08-12 22:53:28 -07001800 } else {
1801 prefetch(fp->tx_cons_sb);
1802 prefetch(&fp->status_blk->c_status_block.
1803 status_block_index);
1804
1805 bnx2x_update_fpsb_idx(fp);
1806 rmb();
1807 bnx2x_tx_int(fp);
1808
1809 /* Re-enable interrupts */
1810 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811 le16_to_cpu(fp->fp_u_idx),
1812 IGU_INT_NOP, 1);
1813 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814 le16_to_cpu(fp->fp_c_idx),
1815 IGU_INT_ENABLE, 1);
1816 }
1817 status &= ~mask;
1818 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819 }
1820
Michael Chan993ac7b2009-10-10 13:46:56 +00001821#ifdef BCM_CNIC
1822 mask = 0x2 << CNIC_SB_ID(bp);
1823 if (status & (mask | 0x1)) {
1824 struct cnic_ops *c_ops = NULL;
1825
1826 rcu_read_lock();
1827 c_ops = rcu_dereference(bp->cnic_ops);
1828 if (c_ops)
1829 c_ops->cnic_handler(bp->cnic_data, NULL);
1830 rcu_read_unlock();
1831
1832 status &= ~mask;
1833 }
1834#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001835
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001836 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001837 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001838
1839 status &= ~0x1;
1840 if (!status)
1841 return IRQ_HANDLED;
1842 }
1843
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001844 if (status)
1845 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1846 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001847
1848 return IRQ_HANDLED;
1849}
1850
1851/* end of fast path */
1852
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001853static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001854
1855/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001856
1857/*
1858 * General service functions
1859 */
1860
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001861static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001862{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001863 u32 lock_status;
1864 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001865 int func = BP_FUNC(bp);
1866 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001867 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001868
1869 /* Validating that the resource is within range */
1870 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1871 DP(NETIF_MSG_HW,
1872 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1874 return -EINVAL;
1875 }
1876
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001877 if (func <= 5) {
1878 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1879 } else {
1880 hw_lock_control_reg =
1881 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1882 }
1883
Eliezer Tamirf1410642008-02-28 11:51:50 -08001884 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001885 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001886 if (lock_status & resource_bit) {
1887 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1888 lock_status, resource_bit);
1889 return -EEXIST;
1890 }
1891
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001892 /* Try for 5 second every 5ms */
1893 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001894 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001895 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001897 if (lock_status & resource_bit)
1898 return 0;
1899
1900 msleep(5);
1901 }
1902 DP(NETIF_MSG_HW, "Timeout\n");
1903 return -EAGAIN;
1904}
1905
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001906static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001907{
1908 u32 lock_status;
1909 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001910 int func = BP_FUNC(bp);
1911 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001912
1913 /* Validating that the resource is within range */
1914 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1915 DP(NETIF_MSG_HW,
1916 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1918 return -EINVAL;
1919 }
1920
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001921 if (func <= 5) {
1922 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1923 } else {
1924 hw_lock_control_reg =
1925 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1926 }
1927
Eliezer Tamirf1410642008-02-28 11:51:50 -08001928 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001929 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001930 if (!(lock_status & resource_bit)) {
1931 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1932 lock_status, resource_bit);
1933 return -EFAULT;
1934 }
1935
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001936 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001937 return 0;
1938}
1939
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001940/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001941static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001942{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001943 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001944
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001945 if (bp->port.need_hw_lock)
1946 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001947}
1948
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001949static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001950{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001951 if (bp->port.need_hw_lock)
1952 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001953
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001954 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001955}
1956
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001957int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1958{
1959 /* The GPIO should be swapped if swap register is set and active */
1960 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962 int gpio_shift = gpio_num +
1963 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964 u32 gpio_mask = (1 << gpio_shift);
1965 u32 gpio_reg;
1966 int value;
1967
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970 return -EINVAL;
1971 }
1972
1973 /* read GPIO value */
1974 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1975
1976 /* get the requested pin value */
1977 if ((gpio_reg & gpio_mask) == gpio_mask)
1978 value = 1;
1979 else
1980 value = 0;
1981
1982 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1983
1984 return value;
1985}
1986
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001987int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001988{
1989 /* The GPIO should be swapped if swap register is set and active */
1990 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001991 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001992 int gpio_shift = gpio_num +
1993 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994 u32 gpio_mask = (1 << gpio_shift);
1995 u32 gpio_reg;
1996
1997 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1999 return -EINVAL;
2000 }
2001
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002002 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002003 /* read GPIO and mask except the float bits */
2004 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2005
2006 switch (mode) {
2007 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009 gpio_num, gpio_shift);
2010 /* clear FLOAT and set CLR */
2011 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2013 break;
2014
2015 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set SET */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2021 break;
2022
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002023 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025 gpio_num, gpio_shift);
2026 /* set FLOAT */
2027 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028 break;
2029
2030 default:
2031 break;
2032 }
2033
2034 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002035 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002036
2037 return 0;
2038}
2039
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002040int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2041{
2042 /* The GPIO should be swapped if swap register is set and active */
2043 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045 int gpio_shift = gpio_num +
2046 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047 u32 gpio_mask = (1 << gpio_shift);
2048 u32 gpio_reg;
2049
2050 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2052 return -EINVAL;
2053 }
2054
2055 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2056 /* read GPIO int */
2057 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2058
2059 switch (mode) {
2060 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062 "output low\n", gpio_num, gpio_shift);
2063 /* clear SET and set CLR */
2064 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2066 break;
2067
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070 "output high\n", gpio_num, gpio_shift);
2071 /* clear CLR and set SET */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074 break;
2075
2076 default:
2077 break;
2078 }
2079
2080 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083 return 0;
2084}
2085
Eliezer Tamirf1410642008-02-28 11:51:50 -08002086static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2087{
2088 u32 spio_mask = (1 << spio_num);
2089 u32 spio_reg;
2090
2091 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092 (spio_num > MISC_REGISTERS_SPIO_7)) {
2093 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2094 return -EINVAL;
2095 }
2096
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002097 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002098 /* read SPIO and mask except the float bits */
2099 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2100
2101 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002102 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002103 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104 /* clear FLOAT and set CLR */
2105 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2107 break;
2108
Eilon Greenstein6378c022008-08-13 15:59:25 -07002109 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002110 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111 /* clear FLOAT and set SET */
2112 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2114 break;
2115
2116 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2118 /* set FLOAT */
2119 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2120 break;
2121
2122 default:
2123 break;
2124 }
2125
2126 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002128
2129 return 0;
2130}
2131
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002132static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002133{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002134 switch (bp->link_vars.ieee_fc &
2135 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002136 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002137 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002138 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002139 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002140
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002141 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002142 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002144 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002145
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002146 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002147 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002148 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002149
Eliezer Tamirf1410642008-02-28 11:51:50 -08002150 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002151 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002152 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002153 break;
2154 }
2155}
2156
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002157static void bnx2x_link_report(struct bnx2x *bp)
2158{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002159 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002160 netif_carrier_off(bp->dev);
2161 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2162 return;
2163 }
2164
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002165 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002166 u16 line_speed;
2167
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002168 if (bp->state == BNX2X_STATE_OPEN)
2169 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002170 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2171
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002172 line_speed = bp->link_vars.line_speed;
2173 if (IS_E1HMF(bp)) {
2174 u16 vn_max_rate;
2175
2176 vn_max_rate =
2177 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2178 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2179 if (vn_max_rate < line_speed)
2180 line_speed = vn_max_rate;
2181 }
2182 printk("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002184 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002185 printk("full duplex");
2186 else
2187 printk("half duplex");
2188
David S. Millerc0700f92008-12-16 23:53:20 -08002189 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2190 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002191 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002192 if (bp->link_vars.flow_ctrl &
2193 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002194 printk("& transmit ");
2195 } else {
2196 printk(", transmit ");
2197 }
2198 printk("flow control ON");
2199 }
2200 printk("\n");
2201
2202 } else { /* link_down */
2203 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002204 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002205 }
2206}
2207
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002208static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002209{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002210 if (!BP_NOMCP(bp)) {
2211 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002212
Eilon Greenstein19680c42008-08-13 15:47:33 -07002213 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002214 /* It is recommended to turn off RX FC for jumbo frames
2215 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002216 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002217 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002218 else
David S. Millerc0700f92008-12-16 23:53:20 -08002219 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002220
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002221 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002222
2223 if (load_mode == LOAD_DIAG)
2224 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2225
Eilon Greenstein19680c42008-08-13 15:47:33 -07002226 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002227
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002228 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002229
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002230 bnx2x_calc_fc_adv(bp);
2231
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002232 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2233 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002234 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002235 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002236
Eilon Greenstein19680c42008-08-13 15:47:33 -07002237 return rc;
2238 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002239 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002240 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002241}
2242
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002243static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002244{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002245 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002246 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002247 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002248 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002249
Eilon Greenstein19680c42008-08-13 15:47:33 -07002250 bnx2x_calc_fc_adv(bp);
2251 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002252 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253}
2254
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002255static void bnx2x__link_reset(struct bnx2x *bp)
2256{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002257 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002258 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002259 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002260 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002261 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002262 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002263}
2264
2265static u8 bnx2x_link_test(struct bnx2x *bp)
2266{
2267 u8 rc;
2268
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002269 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002270 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002271 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002272
2273 return rc;
2274}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002275
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002276static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002277{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278 u32 r_param = bp->link_vars.line_speed / 8;
2279 u32 fair_periodic_timeout_usec;
2280 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002281
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002282 memset(&(bp->cmng.rs_vars), 0,
2283 sizeof(struct rate_shaping_vars_per_port));
2284 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002285
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002286 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2287 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002288
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002289 /* this is the threshold below which no timer arming will occur
2290 1.25 coefficient is for the threshold to be a little bigger
2291 than the real time, to compensate for timer in-accuracy */
2292 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002293 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2294
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002295 /* resolution of fairness timer */
2296 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2297 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2298 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002299
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002300 /* this is the threshold below which we won't arm the timer anymore */
2301 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002302
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002303 /* we multiply by 1e3/8 to get bytes/msec.
2304 We don't want the credits to pass a credit
2305 of the t_fair*FAIR_MEM (algorithm resolution) */
2306 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2307 /* since each tick is 4 usec */
2308 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002309}
2310
Eilon Greenstein2691d512009-08-12 08:22:08 +00002311/* Calculates the sum of vn_min_rates.
2312 It's needed for further normalizing of the min_rates.
2313 Returns:
2314 sum of vn_min_rates.
2315 or
2316 0 - if all the min_rates are 0.
2317 In the later case fainess algorithm should be deactivated.
2318 If not all min_rates are zero then those that are zeroes will be set to 1.
2319 */
2320static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2321{
2322 int all_zero = 1;
2323 int port = BP_PORT(bp);
2324 int vn;
2325
2326 bp->vn_weight_sum = 0;
2327 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2328 int func = 2*vn + port;
2329 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2330 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2331 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2332
2333 /* Skip hidden vns */
2334 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2335 continue;
2336
2337 /* If min rate is zero - set it to 1 */
2338 if (!vn_min_rate)
2339 vn_min_rate = DEF_MIN_RATE;
2340 else
2341 all_zero = 0;
2342
2343 bp->vn_weight_sum += vn_min_rate;
2344 }
2345
2346 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002347 if (all_zero) {
2348 bp->cmng.flags.cmng_enables &=
2349 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2350 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2351 " fairness will be disabled\n");
2352 } else
2353 bp->cmng.flags.cmng_enables |=
2354 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002355}
2356
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002358{
2359 struct rate_shaping_vars_per_vn m_rs_vn;
2360 struct fairness_vars_per_vn m_fair_vn;
2361 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2362 u16 vn_min_rate, vn_max_rate;
2363 int i;
2364
2365 /* If function is hidden - set min and max to zeroes */
2366 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2367 vn_min_rate = 0;
2368 vn_max_rate = 0;
2369
2370 } else {
2371 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2372 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002373 /* If min rate is zero - set it to 1 */
2374 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002375 vn_min_rate = DEF_MIN_RATE;
2376 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2377 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2378 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002379 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002380 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002381 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002382
2383 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2384 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2385
2386 /* global vn counter - maximal Mbps for this vn */
2387 m_rs_vn.vn_counter.rate = vn_max_rate;
2388
2389 /* quota - number of bytes transmitted in this period */
2390 m_rs_vn.vn_counter.quota =
2391 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2392
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002393 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002394 /* credit for each period of the fairness algorithm:
2395 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002396 vn_weight_sum should not be larger than 10000, thus
2397 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2398 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002399 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002400 max((u32)(vn_min_rate * (T_FAIR_COEF /
2401 (8 * bp->vn_weight_sum))),
2402 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002403 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2404 m_fair_vn.vn_credit_delta);
2405 }
2406
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002407 /* Store it to internal memory */
2408 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2409 REG_WR(bp, BAR_XSTRORM_INTMEM +
2410 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2411 ((u32 *)(&m_rs_vn))[i]);
2412
2413 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2414 REG_WR(bp, BAR_XSTRORM_INTMEM +
2415 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2416 ((u32 *)(&m_fair_vn))[i]);
2417}
2418
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002419
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002420/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002421static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002422{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002423 /* Make sure that we are synced with the current statistics */
2424 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2425
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002426 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002427
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002428 if (bp->link_vars.link_up) {
2429
Eilon Greenstein1c063282009-02-12 08:36:43 +00002430 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002431 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002432 int port = BP_PORT(bp);
2433 u32 pause_enabled = 0;
2434
2435 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2436 pause_enabled = 1;
2437
2438 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002439 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002440 pause_enabled);
2441 }
2442
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002443 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2444 struct host_port_stats *pstats;
2445
2446 pstats = bnx2x_sp(bp, port_stats);
2447 /* reset old bmac stats */
2448 memset(&(pstats->mac_stx[0]), 0,
2449 sizeof(struct mac_stx));
2450 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002451 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002452 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2453 }
2454
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002455 /* indicate link status */
2456 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002457
2458 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002459 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002460 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002461 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002462
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002463 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002464 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2465 if (vn == BP_E1HVN(bp))
2466 continue;
2467
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002468 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002469 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2470 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2471 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002472
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002473 if (bp->link_vars.link_up) {
2474 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002475
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002476 /* Init rate shaping and fairness contexts */
2477 bnx2x_init_port_minmax(bp);
2478
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002479 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002480 bnx2x_init_vn_minmax(bp, 2*vn + port);
2481
2482 /* Store it to internal memory */
2483 for (i = 0;
2484 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2485 REG_WR(bp, BAR_XSTRORM_INTMEM +
2486 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2487 ((u32 *)(&bp->cmng))[i]);
2488 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002489 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002490}
2491
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002492static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002493{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002494 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002495 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002496
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002497 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2498
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002499 if (bp->link_vars.link_up)
2500 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2501 else
2502 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2503
Eilon Greenstein2691d512009-08-12 08:22:08 +00002504 bnx2x_calc_vn_weight_sum(bp);
2505
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002506 /* indicate link status */
2507 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002508}
2509
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002510static void bnx2x_pmf_update(struct bnx2x *bp)
2511{
2512 int port = BP_PORT(bp);
2513 u32 val;
2514
2515 bp->port.pmf = 1;
2516 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2517
2518 /* enable nig attention */
2519 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2520 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2521 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002522
2523 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002524}
2525
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002526/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002527
2528/* slow path */
2529
2530/*
2531 * General service functions
2532 */
2533
Eilon Greenstein2691d512009-08-12 08:22:08 +00002534/* send the MCP a request, block until there is a reply */
2535u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2536{
2537 int func = BP_FUNC(bp);
2538 u32 seq = ++bp->fw_seq;
2539 u32 rc = 0;
2540 u32 cnt = 1;
2541 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2542
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002543 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002544 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2545 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2546
2547 do {
2548 /* let the FW do it's magic ... */
2549 msleep(delay);
2550
2551 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2552
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002553 /* Give the FW up to 5 second (500*10ms) */
2554 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002555
2556 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2557 cnt*delay, rc, seq);
2558
2559 /* is this a reply to our command? */
2560 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2561 rc &= FW_MSG_CODE_MASK;
2562 else {
2563 /* FW BUG! */
2564 BNX2X_ERR("FW failed to respond!\n");
2565 bnx2x_fw_dump(bp);
2566 rc = 0;
2567 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002568 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002569
2570 return rc;
2571}
2572
2573static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002574static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002575static void bnx2x_set_rx_mode(struct net_device *dev);
2576
2577static void bnx2x_e1h_disable(struct bnx2x *bp)
2578{
2579 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002580
2581 netif_tx_disable(bp->dev);
2582 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2583
2584 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2585
Eilon Greenstein2691d512009-08-12 08:22:08 +00002586 netif_carrier_off(bp->dev);
2587}
2588
2589static void bnx2x_e1h_enable(struct bnx2x *bp)
2590{
2591 int port = BP_PORT(bp);
2592
2593 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594
Eilon Greenstein2691d512009-08-12 08:22:08 +00002595 /* Tx queue should be only reenabled */
2596 netif_tx_wake_all_queues(bp->dev);
2597
Eilon Greenstein061bc702009-10-15 00:18:47 -07002598 /*
2599 * Should not call netif_carrier_on since it will be called if the link
2600 * is up when checking for link state
2601 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002602}
2603
2604static void bnx2x_update_min_max(struct bnx2x *bp)
2605{
2606 int port = BP_PORT(bp);
2607 int vn, i;
2608
2609 /* Init rate shaping and fairness contexts */
2610 bnx2x_init_port_minmax(bp);
2611
2612 bnx2x_calc_vn_weight_sum(bp);
2613
2614 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2615 bnx2x_init_vn_minmax(bp, 2*vn + port);
2616
2617 if (bp->port.pmf) {
2618 int func;
2619
2620 /* Set the attention towards other drivers on the same port */
2621 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2622 if (vn == BP_E1HVN(bp))
2623 continue;
2624
2625 func = ((vn << 1) | port);
2626 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2627 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2628 }
2629
2630 /* Store it to internal memory */
2631 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2632 REG_WR(bp, BAR_XSTRORM_INTMEM +
2633 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2634 ((u32 *)(&bp->cmng))[i]);
2635 }
2636}
2637
2638static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2639{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002640 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002641
2642 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002644 /*
2645 * This is the only place besides the function initialization
2646 * where the bp->flags can change so it is done without any
2647 * locks
2648 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002649 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2650 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002651 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002652
2653 bnx2x_e1h_disable(bp);
2654 } else {
2655 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002656 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002657
2658 bnx2x_e1h_enable(bp);
2659 }
2660 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2661 }
2662 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2663
2664 bnx2x_update_min_max(bp);
2665 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2666 }
2667
2668 /* Report results to MCP */
2669 if (dcc_event)
2670 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2671 else
2672 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2673}
2674
Michael Chan28912902009-10-10 13:46:53 +00002675/* must be called under the spq lock */
2676static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2677{
2678 struct eth_spe *next_spe = bp->spq_prod_bd;
2679
2680 if (bp->spq_prod_bd == bp->spq_last_bd) {
2681 bp->spq_prod_bd = bp->spq;
2682 bp->spq_prod_idx = 0;
2683 DP(NETIF_MSG_TIMER, "end of spq\n");
2684 } else {
2685 bp->spq_prod_bd++;
2686 bp->spq_prod_idx++;
2687 }
2688 return next_spe;
2689}
2690
2691/* must be called under the spq lock */
2692static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2693{
2694 int func = BP_FUNC(bp);
2695
2696 /* Make sure that BD data is updated before writing the producer */
2697 wmb();
2698
2699 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2700 bp->spq_prod_idx);
2701 mmiowb();
2702}
2703
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002704/* the slow path queue is odd since completions arrive on the fastpath ring */
2705static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2706 u32 data_hi, u32 data_lo, int common)
2707{
Michael Chan28912902009-10-10 13:46:53 +00002708 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002710 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2711 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002712 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2713 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2714 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2715
2716#ifdef BNX2X_STOP_ON_ERROR
2717 if (unlikely(bp->panic))
2718 return -EIO;
2719#endif
2720
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002721 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002722
2723 if (!bp->spq_left) {
2724 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002725 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002726 bnx2x_panic();
2727 return -EBUSY;
2728 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002729
Michael Chan28912902009-10-10 13:46:53 +00002730 spe = bnx2x_sp_get_next(bp);
2731
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002732 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002733 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002734 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2735 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002736 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002737 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002738 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002739 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2740
Michael Chan28912902009-10-10 13:46:53 +00002741 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2742 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002743
2744 bp->spq_left--;
2745
Michael Chan28912902009-10-10 13:46:53 +00002746 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002747 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748 return 0;
2749}
2750
2751/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002752static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002753{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002754 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002755 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002756
2757 might_sleep();
2758 i = 100;
2759 for (j = 0; j < i*10; j++) {
2760 val = (1UL << 31);
2761 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2762 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2763 if (val & (1L << 31))
2764 break;
2765
2766 msleep(5);
2767 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002768 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002769 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002770 rc = -EBUSY;
2771 }
2772
2773 return rc;
2774}
2775
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002776/* release split MCP access lock register */
2777static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002778{
2779 u32 val = 0;
2780
2781 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2782}
2783
2784static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2785{
2786 struct host_def_status_block *def_sb = bp->def_status_blk;
2787 u16 rc = 0;
2788
2789 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2791 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2792 rc |= 1;
2793 }
2794 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2795 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2796 rc |= 2;
2797 }
2798 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2799 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2800 rc |= 4;
2801 }
2802 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2803 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2804 rc |= 8;
2805 }
2806 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2807 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2808 rc |= 16;
2809 }
2810 return rc;
2811}
2812
2813/*
2814 * slow path service functions
2815 */
2816
2817static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2818{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002819 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002820 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2821 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002822 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2823 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002824 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2825 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002826 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002827 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002828
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829 if (bp->attn_state & asserted)
2830 BNX2X_ERR("IGU ERROR\n");
2831
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002832 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2833 aeu_mask = REG_RD(bp, aeu_addr);
2834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002836 aeu_mask, asserted);
2837 aeu_mask &= ~(asserted & 0xff);
2838 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002839
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002840 REG_WR(bp, aeu_addr, aeu_mask);
2841 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002842
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002843 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002844 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002845 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002846
2847 if (asserted & ATTN_HARD_WIRED_MASK) {
2848 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002849
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002850 bnx2x_acquire_phy_lock(bp);
2851
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002852 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002853 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002854 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002855
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002856 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002857
2858 /* handle unicore attn? */
2859 }
2860 if (asserted & ATTN_SW_TIMER_4_FUNC)
2861 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2862
2863 if (asserted & GPIO_2_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2865
2866 if (asserted & GPIO_3_FUNC)
2867 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2868
2869 if (asserted & GPIO_4_FUNC)
2870 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2871
2872 if (port == 0) {
2873 if (asserted & ATTN_GENERAL_ATTN_1) {
2874 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2875 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2876 }
2877 if (asserted & ATTN_GENERAL_ATTN_2) {
2878 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2879 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2880 }
2881 if (asserted & ATTN_GENERAL_ATTN_3) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2884 }
2885 } else {
2886 if (asserted & ATTN_GENERAL_ATTN_4) {
2887 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2888 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2889 }
2890 if (asserted & ATTN_GENERAL_ATTN_5) {
2891 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2893 }
2894 if (asserted & ATTN_GENERAL_ATTN_6) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2897 }
2898 }
2899
2900 } /* if hardwired */
2901
Eilon Greenstein5c862842008-08-13 15:51:48 -07002902 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2903 asserted, hc_addr);
2904 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002905
2906 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002907 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002908 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002909 bnx2x_release_phy_lock(bp);
2910 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002911}
2912
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002913static inline void bnx2x_fan_failure(struct bnx2x *bp)
2914{
2915 int port = BP_PORT(bp);
2916
2917 /* mark the failure */
2918 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2919 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2920 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2921 bp->link_params.ext_phy_config);
2922
2923 /* log the failure */
2924 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2925 " the driver to shutdown the card to prevent permanent"
2926 " damage. Please contact Dell Support for assistance\n",
2927 bp->dev->name);
2928}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002929
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002930static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2931{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002932 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002933 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002934 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002935
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002936 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2937 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002938
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002939 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002940
2941 val = REG_RD(bp, reg_offset);
2942 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2943 REG_WR(bp, reg_offset, val);
2944
2945 BNX2X_ERR("SPIO5 hw attention\n");
2946
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002947 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002948 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2949 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002950 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002951 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002952 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002953 /* The PHY reset is controlled by GPIO 1 */
2954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2955 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002956 break;
2957
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2959 /* The PHY reset is controlled by GPIO 1 */
2960 /* fake the port number to cancel the swap done in
2961 set_gpio() */
2962 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2963 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2964 port = (swap_val && swap_override) ^ 1;
2965 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2966 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2967 break;
2968
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002969 default:
2970 break;
2971 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002972 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002973 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002974
Eilon Greenstein589abe32009-02-12 08:36:55 +00002975 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2977 bnx2x_acquire_phy_lock(bp);
2978 bnx2x_handle_module_detect_int(&bp->link_params);
2979 bnx2x_release_phy_lock(bp);
2980 }
2981
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002982 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002989 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002990 bnx2x_panic();
2991 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002992}
2993
2994static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2995{
2996 u32 val;
2997
Eilon Greenstein0626b892009-02-12 08:38:14 +00002998 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002999
3000 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3001 BNX2X_ERR("DB hw attention 0x%x\n", val);
3002 /* DORQ discard attention */
3003 if (val & 0x2)
3004 BNX2X_ERR("FATAL error from DORQ\n");
3005 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003006
3007 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3008
3009 int port = BP_PORT(bp);
3010 int reg_offset;
3011
3012 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3013 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3014
3015 val = REG_RD(bp, reg_offset);
3016 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3017 REG_WR(bp, reg_offset, val);
3018
3019 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003020 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003021 bnx2x_panic();
3022 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003023}
3024
3025static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3026{
3027 u32 val;
3028
3029 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3030
3031 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3032 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3033 /* CFC error attention */
3034 if (val & 0x2)
3035 BNX2X_ERR("FATAL error from CFC\n");
3036 }
3037
3038 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3039
3040 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3041 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3042 /* RQ_USDMDP_FIFO_OVERFLOW */
3043 if (val & 0x18000)
3044 BNX2X_ERR("FATAL error from PXP\n");
3045 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003046
3047 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3048
3049 int port = BP_PORT(bp);
3050 int reg_offset;
3051
3052 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3053 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3054
3055 val = REG_RD(bp, reg_offset);
3056 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3057 REG_WR(bp, reg_offset, val);
3058
3059 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003060 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003061 bnx2x_panic();
3062 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003063}
3064
3065static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3066{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003067 u32 val;
3068
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003069 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3070
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003071 if (attn & BNX2X_PMF_LINK_ASSERT) {
3072 int func = BP_FUNC(bp);
3073
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003075 bp->mf_config = SHMEM_RD(bp,
3076 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003077 val = SHMEM_RD(bp, func_mb[func].drv_status);
3078 if (val & DRV_STATUS_DCC_EVENT_MASK)
3079 bnx2x_dcc_event(bp,
3080 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003081 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003082 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003083 bnx2x_pmf_update(bp);
3084
3085 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003086
3087 BNX2X_ERR("MC assert!\n");
3088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3089 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3092 bnx2x_panic();
3093
3094 } else if (attn & BNX2X_MCP_ASSERT) {
3095
3096 BNX2X_ERR("MCP assert!\n");
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003098 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003099
3100 } else
3101 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3102 }
3103
3104 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003105 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3106 if (attn & BNX2X_GRC_TIMEOUT) {
3107 val = CHIP_IS_E1H(bp) ?
3108 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3109 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3110 }
3111 if (attn & BNX2X_GRC_RSV) {
3112 val = CHIP_IS_E1H(bp) ?
3113 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3114 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3115 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003116 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003117 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003118}
3119
3120static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3121{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003122 struct attn_route attn;
3123 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003124 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003125 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003126 u32 reg_addr;
3127 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003128 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003129
3130 /* need to take HW lock because MCP or other port might also
3131 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003132 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003133
3134 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3135 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3136 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3137 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003138 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3139 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003140
3141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3142 if (deasserted & (1 << index)) {
3143 group_mask = bp->attn_group[index];
3144
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003145 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3146 index, group_mask.sig[0], group_mask.sig[1],
3147 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003148
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003149 bnx2x_attn_int_deasserted3(bp,
3150 attn.sig[3] & group_mask.sig[3]);
3151 bnx2x_attn_int_deasserted1(bp,
3152 attn.sig[1] & group_mask.sig[1]);
3153 bnx2x_attn_int_deasserted2(bp,
3154 attn.sig[2] & group_mask.sig[2]);
3155 bnx2x_attn_int_deasserted0(bp,
3156 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003157
3158 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003159 HW_PRTY_ASSERT_SET_0) ||
3160 (attn.sig[1] & group_mask.sig[1] &
3161 HW_PRTY_ASSERT_SET_1) ||
3162 (attn.sig[2] & group_mask.sig[2] &
3163 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003164 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003165 }
3166 }
3167
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003168 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003169
Eilon Greenstein5c862842008-08-13 15:51:48 -07003170 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003171
3172 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003173 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3174 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003175 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003176
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003177 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003178 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003179
3180 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3181 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3182
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003183 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3184 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003185
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003186 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3187 aeu_mask, deasserted);
3188 aeu_mask |= (deasserted & 0xff);
3189 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3190
3191 REG_WR(bp, reg_addr, aeu_mask);
3192 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003193
3194 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3195 bp->attn_state &= ~deasserted;
3196 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3197}
3198
3199static void bnx2x_attn_int(struct bnx2x *bp)
3200{
3201 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003202 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3203 attn_bits);
3204 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3205 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003206 u32 attn_state = bp->attn_state;
3207
3208 /* look for changed bits */
3209 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3210 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3211
3212 DP(NETIF_MSG_HW,
3213 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3214 attn_bits, attn_ack, asserted, deasserted);
3215
3216 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003217 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003218
3219 /* handle bits that were raised */
3220 if (asserted)
3221 bnx2x_attn_int_asserted(bp, asserted);
3222
3223 if (deasserted)
3224 bnx2x_attn_int_deasserted(bp, deasserted);
3225}
3226
3227static void bnx2x_sp_task(struct work_struct *work)
3228{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003229 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003230 u16 status;
3231
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003232
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003233 /* Return here if interrupt is disabled */
3234 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003235 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003236 return;
3237 }
3238
3239 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003240/* if (status == 0) */
3241/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003242
Eilon Greenstein3196a882008-08-13 15:58:49 -07003243 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003244
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003245 /* HW attentions */
3246 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003247 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003248
Eilon Greenstein68d59482009-01-14 21:27:36 -08003249 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003250 IGU_INT_NOP, 1);
3251 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3252 IGU_INT_NOP, 1);
3253 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3254 IGU_INT_NOP, 1);
3255 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3256 IGU_INT_NOP, 1);
3257 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3258 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003259
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003260}
3261
3262static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3263{
3264 struct net_device *dev = dev_instance;
3265 struct bnx2x *bp = netdev_priv(dev);
3266
3267 /* Return here if interrupt is disabled */
3268 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003269 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003270 return IRQ_HANDLED;
3271 }
3272
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003273 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003274
3275#ifdef BNX2X_STOP_ON_ERROR
3276 if (unlikely(bp->panic))
3277 return IRQ_HANDLED;
3278#endif
3279
Michael Chan993ac7b2009-10-10 13:46:56 +00003280#ifdef BCM_CNIC
3281 {
3282 struct cnic_ops *c_ops;
3283
3284 rcu_read_lock();
3285 c_ops = rcu_dereference(bp->cnic_ops);
3286 if (c_ops)
3287 c_ops->cnic_handler(bp->cnic_data, NULL);
3288 rcu_read_unlock();
3289 }
3290#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003291 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003292
3293 return IRQ_HANDLED;
3294}
3295
3296/* end of slow path */
3297
3298/* Statistics */
3299
3300/****************************************************************************
3301* Macros
3302****************************************************************************/
3303
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003304/* sum[hi:lo] += add[hi:lo] */
3305#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3306 do { \
3307 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003308 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003309 } while (0)
3310
3311/* difference = minuend - subtrahend */
3312#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3313 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003314 if (m_lo < s_lo) { \
3315 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003316 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003317 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003318 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003319 d_hi--; \
3320 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003321 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003322 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003323 d_hi = 0; \
3324 d_lo = 0; \
3325 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003326 } else { \
3327 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003328 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003329 d_hi = 0; \
3330 d_lo = 0; \
3331 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003332 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003333 d_hi = m_hi - s_hi; \
3334 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003335 } \
3336 } \
3337 } while (0)
3338
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003339#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003340 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003341 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3342 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3343 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3344 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3345 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3346 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003347 } while (0)
3348
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003349#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003350 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003351 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3352 diff.lo, new->s##_lo, old->s##_lo); \
3353 ADD_64(estats->t##_hi, diff.hi, \
3354 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003355 } while (0)
3356
3357/* sum[hi:lo] += add */
3358#define ADD_EXTEND_64(s_hi, s_lo, a) \
3359 do { \
3360 s_lo += a; \
3361 s_hi += (s_lo < a) ? 1 : 0; \
3362 } while (0)
3363
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003364#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003365 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003366 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3367 pstats->mac_stx[1].s##_lo, \
3368 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003369 } while (0)
3370
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003371#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003372 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003373 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3374 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003375 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3376 } while (0)
3377
3378#define UPDATE_EXTEND_USTAT(s, t) \
3379 do { \
3380 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3381 old_uclient->s = uclient->s; \
3382 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003383 } while (0)
3384
3385#define UPDATE_EXTEND_XSTAT(s, t) \
3386 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003387 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3388 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003389 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3390 } while (0)
3391
3392/* minuend -= subtrahend */
3393#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3394 do { \
3395 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3396 } while (0)
3397
3398/* minuend[hi:lo] -= subtrahend */
3399#define SUB_EXTEND_64(m_hi, m_lo, s) \
3400 do { \
3401 SUB_64(m_hi, 0, m_lo, s); \
3402 } while (0)
3403
3404#define SUB_EXTEND_USTAT(s, t) \
3405 do { \
3406 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3407 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003408 } while (0)
3409
3410/*
3411 * General service functions
3412 */
3413
3414static inline long bnx2x_hilo(u32 *hiref)
3415{
3416 u32 lo = *(hiref + 1);
3417#if (BITS_PER_LONG == 64)
3418 u32 hi = *hiref;
3419
3420 return HILO_U64(hi, lo);
3421#else
3422 return lo;
3423#endif
3424}
3425
3426/*
3427 * Init service functions
3428 */
3429
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003430static void bnx2x_storm_stats_post(struct bnx2x *bp)
3431{
3432 if (!bp->stats_pending) {
3433 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003434 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003435
3436 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003437 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003438 for_each_queue(bp, i)
3439 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003440
3441 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3442 ((u32 *)&ramrod_data)[1],
3443 ((u32 *)&ramrod_data)[0], 0);
3444 if (rc == 0) {
3445 /* stats ramrod has it's own slot on the spq */
3446 bp->spq_left++;
3447 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003448 }
3449 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003450}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003451
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003452static void bnx2x_hw_stats_post(struct bnx2x *bp)
3453{
3454 struct dmae_command *dmae = &bp->stats_dmae;
3455 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3456
3457 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003458 if (CHIP_REV_IS_SLOW(bp))
3459 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003460
3461 /* loader */
3462 if (bp->executer_idx) {
3463 int loader_idx = PMF_DMAE_C(bp);
3464
3465 memset(dmae, 0, sizeof(struct dmae_command));
3466
3467 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3468 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3469 DMAE_CMD_DST_RESET |
3470#ifdef __BIG_ENDIAN
3471 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3472#else
3473 DMAE_CMD_ENDIANITY_DW_SWAP |
3474#endif
3475 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3476 DMAE_CMD_PORT_0) |
3477 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3478 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3479 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3480 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3481 sizeof(struct dmae_command) *
3482 (loader_idx + 1)) >> 2;
3483 dmae->dst_addr_hi = 0;
3484 dmae->len = sizeof(struct dmae_command) >> 2;
3485 if (CHIP_IS_E1(bp))
3486 dmae->len--;
3487 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3488 dmae->comp_addr_hi = 0;
3489 dmae->comp_val = 1;
3490
3491 *stats_comp = 0;
3492 bnx2x_post_dmae(bp, dmae, loader_idx);
3493
3494 } else if (bp->func_stx) {
3495 *stats_comp = 0;
3496 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3497 }
3498}
3499
3500static int bnx2x_stats_comp(struct bnx2x *bp)
3501{
3502 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3503 int cnt = 10;
3504
3505 might_sleep();
3506 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003507 if (!cnt) {
3508 BNX2X_ERR("timeout waiting for stats finished\n");
3509 break;
3510 }
3511 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003512 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003513 }
3514 return 1;
3515}
3516
3517/*
3518 * Statistics service functions
3519 */
3520
3521static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3522{
3523 struct dmae_command *dmae;
3524 u32 opcode;
3525 int loader_idx = PMF_DMAE_C(bp);
3526 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3527
3528 /* sanity */
3529 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3530 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003531 return;
3532 }
3533
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003534 bp->executer_idx = 0;
3535
3536 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3537 DMAE_CMD_C_ENABLE |
3538 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3539#ifdef __BIG_ENDIAN
3540 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3541#else
3542 DMAE_CMD_ENDIANITY_DW_SWAP |
3543#endif
3544 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3546
3547 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3549 dmae->src_addr_lo = bp->port.port_stx >> 2;
3550 dmae->src_addr_hi = 0;
3551 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3552 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3553 dmae->len = DMAE_LEN32_RD_MAX;
3554 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555 dmae->comp_addr_hi = 0;
3556 dmae->comp_val = 1;
3557
3558 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3559 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3560 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3561 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003562 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3563 DMAE_LEN32_RD_MAX * 4);
3564 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3565 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003566 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3567 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3568 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3569 dmae->comp_val = DMAE_COMP_VAL;
3570
3571 *stats_comp = 0;
3572 bnx2x_hw_stats_post(bp);
3573 bnx2x_stats_comp(bp);
3574}
3575
3576static void bnx2x_port_stats_init(struct bnx2x *bp)
3577{
3578 struct dmae_command *dmae;
3579 int port = BP_PORT(bp);
3580 int vn = BP_E1HVN(bp);
3581 u32 opcode;
3582 int loader_idx = PMF_DMAE_C(bp);
3583 u32 mac_addr;
3584 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3585
3586 /* sanity */
3587 if (!bp->link_vars.link_up || !bp->port.pmf) {
3588 BNX2X_ERR("BUG!\n");
3589 return;
3590 }
3591
3592 bp->executer_idx = 0;
3593
3594 /* MCP */
3595 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3596 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3597 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3598#ifdef __BIG_ENDIAN
3599 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3600#else
3601 DMAE_CMD_ENDIANITY_DW_SWAP |
3602#endif
3603 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3604 (vn << DMAE_CMD_E1HVN_SHIFT));
3605
3606 if (bp->port.port_stx) {
3607
3608 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3609 dmae->opcode = opcode;
3610 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3611 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3612 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3613 dmae->dst_addr_hi = 0;
3614 dmae->len = sizeof(struct host_port_stats) >> 2;
3615 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616 dmae->comp_addr_hi = 0;
3617 dmae->comp_val = 1;
3618 }
3619
3620 if (bp->func_stx) {
3621
3622 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3623 dmae->opcode = opcode;
3624 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3625 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3626 dmae->dst_addr_lo = bp->func_stx >> 2;
3627 dmae->dst_addr_hi = 0;
3628 dmae->len = sizeof(struct host_func_stats) >> 2;
3629 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3630 dmae->comp_addr_hi = 0;
3631 dmae->comp_val = 1;
3632 }
3633
3634 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003635 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3636 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3637 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3638#ifdef __BIG_ENDIAN
3639 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3640#else
3641 DMAE_CMD_ENDIANITY_DW_SWAP |
3642#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003643 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3644 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003645
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003646 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003647
3648 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3649 NIG_REG_INGRESS_BMAC0_MEM);
3650
3651 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3652 BIGMAC_REGISTER_TX_STAT_GTBYT */
3653 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654 dmae->opcode = opcode;
3655 dmae->src_addr_lo = (mac_addr +
3656 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3657 dmae->src_addr_hi = 0;
3658 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3661 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3662 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3663 dmae->comp_addr_hi = 0;
3664 dmae->comp_val = 1;
3665
3666 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3667 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3668 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3669 dmae->opcode = opcode;
3670 dmae->src_addr_lo = (mac_addr +
3671 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672 dmae->src_addr_hi = 0;
3673 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003674 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003675 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003676 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003677 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3678 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3681 dmae->comp_val = 1;
3682
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003683 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003684
3685 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3686
3687 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3688 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689 dmae->opcode = opcode;
3690 dmae->src_addr_lo = (mac_addr +
3691 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3692 dmae->src_addr_hi = 0;
3693 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3694 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3695 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3696 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3697 dmae->comp_addr_hi = 0;
3698 dmae->comp_val = 1;
3699
3700 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3701 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3702 dmae->opcode = opcode;
3703 dmae->src_addr_lo = (mac_addr +
3704 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3705 dmae->src_addr_hi = 0;
3706 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003707 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003708 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003709 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003710 dmae->len = 1;
3711 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3712 dmae->comp_addr_hi = 0;
3713 dmae->comp_val = 1;
3714
3715 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3716 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3717 dmae->opcode = opcode;
3718 dmae->src_addr_lo = (mac_addr +
3719 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3720 dmae->src_addr_hi = 0;
3721 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003722 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003723 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003724 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003725 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3726 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3727 dmae->comp_addr_hi = 0;
3728 dmae->comp_val = 1;
3729 }
3730
3731 /* NIG */
3732 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003733 dmae->opcode = opcode;
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3735 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3736 dmae->src_addr_hi = 0;
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3738 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3739 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3740 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3741 dmae->comp_addr_hi = 0;
3742 dmae->comp_val = 1;
3743
3744 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3745 dmae->opcode = opcode;
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3748 dmae->src_addr_hi = 0;
3749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3755 dmae->comp_addr_hi = 0;
3756 dmae->comp_val = 1;
3757
3758 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3760 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3761 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3762#ifdef __BIG_ENDIAN
3763 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3764#else
3765 DMAE_CMD_ENDIANITY_DW_SWAP |
3766#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003767 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3768 (vn << DMAE_CMD_E1HVN_SHIFT));
3769 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3770 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003771 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003772 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3773 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3774 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3775 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3776 dmae->len = (2*sizeof(u32)) >> 2;
3777 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3778 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3779 dmae->comp_val = DMAE_COMP_VAL;
3780
3781 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003782}
3783
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003784static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003785{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003786 struct dmae_command *dmae = &bp->stats_dmae;
3787 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003788
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003789 /* sanity */
3790 if (!bp->func_stx) {
3791 BNX2X_ERR("BUG!\n");
3792 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003793 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003794
3795 bp->executer_idx = 0;
3796 memset(dmae, 0, sizeof(struct dmae_command));
3797
3798 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3799 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3800 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3801#ifdef __BIG_ENDIAN
3802 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3803#else
3804 DMAE_CMD_ENDIANITY_DW_SWAP |
3805#endif
3806 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3807 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3808 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3809 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3810 dmae->dst_addr_lo = bp->func_stx >> 2;
3811 dmae->dst_addr_hi = 0;
3812 dmae->len = sizeof(struct host_func_stats) >> 2;
3813 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3814 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3815 dmae->comp_val = DMAE_COMP_VAL;
3816
3817 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818}
3819
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003820static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003822 if (bp->port.pmf)
3823 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003824
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003825 else if (bp->func_stx)
3826 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003827
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003828 bnx2x_hw_stats_post(bp);
3829 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003830}
3831
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003832static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003833{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003834 bnx2x_stats_comp(bp);
3835 bnx2x_stats_pmf_update(bp);
3836 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003837}
3838
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003839static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003840{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003841 bnx2x_stats_comp(bp);
3842 bnx2x_stats_start(bp);
3843}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003844
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003845static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3846{
3847 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3848 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003849 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003850 struct {
3851 u32 lo;
3852 u32 hi;
3853 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003854
3855 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3856 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3857 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3858 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3859 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3860 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003861 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003862 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003863 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003864 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3865 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3866 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3867 UPDATE_STAT64(tx_stat_gt127,
3868 tx_stat_etherstatspkts65octetsto127octets);
3869 UPDATE_STAT64(tx_stat_gt255,
3870 tx_stat_etherstatspkts128octetsto255octets);
3871 UPDATE_STAT64(tx_stat_gt511,
3872 tx_stat_etherstatspkts256octetsto511octets);
3873 UPDATE_STAT64(tx_stat_gt1023,
3874 tx_stat_etherstatspkts512octetsto1023octets);
3875 UPDATE_STAT64(tx_stat_gt1518,
3876 tx_stat_etherstatspkts1024octetsto1522octets);
3877 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3878 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3879 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3880 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3881 UPDATE_STAT64(tx_stat_gterr,
3882 tx_stat_dot3statsinternalmactransmiterrors);
3883 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003884
3885 estats->pause_frames_received_hi =
3886 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3887 estats->pause_frames_received_lo =
3888 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3889
3890 estats->pause_frames_sent_hi =
3891 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3892 estats->pause_frames_sent_lo =
3893 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003894}
3895
3896static void bnx2x_emac_stats_update(struct bnx2x *bp)
3897{
3898 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3899 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003900 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003901
3902 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3903 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3904 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3905 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3906 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3907 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3908 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3909 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3910 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3911 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3912 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3913 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3914 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3915 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3916 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3917 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3918 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3921 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3922 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3923 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3924 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3926 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3927 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3928 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3929 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3930 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3931 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3932 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003933
3934 estats->pause_frames_received_hi =
3935 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3936 estats->pause_frames_received_lo =
3937 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3938 ADD_64(estats->pause_frames_received_hi,
3939 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3940 estats->pause_frames_received_lo,
3941 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3942
3943 estats->pause_frames_sent_hi =
3944 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3945 estats->pause_frames_sent_lo =
3946 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3947 ADD_64(estats->pause_frames_sent_hi,
3948 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3949 estats->pause_frames_sent_lo,
3950 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003951}
3952
3953static int bnx2x_hw_stats_update(struct bnx2x *bp)
3954{
3955 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3956 struct nig_stats *old = &(bp->port.old_nig_stats);
3957 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3958 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003959 struct {
3960 u32 lo;
3961 u32 hi;
3962 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003963 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003964
3965 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3966 bnx2x_bmac_stats_update(bp);
3967
3968 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3969 bnx2x_emac_stats_update(bp);
3970
3971 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003972 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003973 return -1;
3974 }
3975
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003976 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3977 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003978 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3979 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003980
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003981 UPDATE_STAT64_NIG(egress_mac_pkt0,
3982 etherstatspkts1024octetsto1522octets);
3983 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003984
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003985 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003986
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003987 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3988 sizeof(struct mac_stx));
3989 estats->brb_drop_hi = pstats->brb_drop_hi;
3990 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003991
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003992 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003993
Eilon Greensteinde832a52009-02-12 08:36:33 +00003994 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3995 if (nig_timer_max != estats->nig_timer_max) {
3996 estats->nig_timer_max = nig_timer_max;
3997 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3998 }
3999
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004000 return 0;
4001}
4002
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004003static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004004{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004005 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004006 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004007 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004008 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4009 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004010 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004011
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004012 memcpy(&(fstats->total_bytes_received_hi),
4013 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004014 sizeof(struct host_func_stats) - 2*sizeof(u32));
4015 estats->error_bytes_received_hi = 0;
4016 estats->error_bytes_received_lo = 0;
4017 estats->etherstatsoverrsizepkts_hi = 0;
4018 estats->etherstatsoverrsizepkts_lo = 0;
4019 estats->no_buff_discard_hi = 0;
4020 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004021
Eilon Greensteinca003922009-08-12 22:53:28 -07004022 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004023 struct bnx2x_fastpath *fp = &bp->fp[i];
4024 int cl_id = fp->cl_id;
4025 struct tstorm_per_client_stats *tclient =
4026 &stats->tstorm_common.client_statistics[cl_id];
4027 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4028 struct ustorm_per_client_stats *uclient =
4029 &stats->ustorm_common.client_statistics[cl_id];
4030 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4031 struct xstorm_per_client_stats *xclient =
4032 &stats->xstorm_common.client_statistics[cl_id];
4033 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4034 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4035 u32 diff;
4036
4037 /* are storm stats valid? */
4038 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4039 bp->stats_counter) {
4040 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4041 " xstorm counter (%d) != stats_counter (%d)\n",
4042 i, xclient->stats_counter, bp->stats_counter);
4043 return -1;
4044 }
4045 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4046 bp->stats_counter) {
4047 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4048 " tstorm counter (%d) != stats_counter (%d)\n",
4049 i, tclient->stats_counter, bp->stats_counter);
4050 return -2;
4051 }
4052 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4053 bp->stats_counter) {
4054 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4055 " ustorm counter (%d) != stats_counter (%d)\n",
4056 i, uclient->stats_counter, bp->stats_counter);
4057 return -4;
4058 }
4059
4060 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004061 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004062 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004063 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4064
4065 ADD_64(qstats->total_bytes_received_hi,
4066 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4067 qstats->total_bytes_received_lo,
4068 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4069
4070 ADD_64(qstats->total_bytes_received_hi,
4071 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4072 qstats->total_bytes_received_lo,
4073 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4074
4075 qstats->valid_bytes_received_hi =
4076 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004077 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004078 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004079
Eilon Greensteinde832a52009-02-12 08:36:33 +00004080 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004081 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004082 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004083 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004084
4085 ADD_64(qstats->total_bytes_received_hi,
4086 qstats->error_bytes_received_hi,
4087 qstats->total_bytes_received_lo,
4088 qstats->error_bytes_received_lo);
4089
4090 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4091 total_unicast_packets_received);
4092 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4093 total_multicast_packets_received);
4094 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4095 total_broadcast_packets_received);
4096 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4097 etherstatsoverrsizepkts);
4098 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4099
4100 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4101 total_unicast_packets_received);
4102 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4103 total_multicast_packets_received);
4104 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4105 total_broadcast_packets_received);
4106 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4107 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4108 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4109
4110 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004111 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004112 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004113 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4114
4115 ADD_64(qstats->total_bytes_transmitted_hi,
4116 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4117 qstats->total_bytes_transmitted_lo,
4118 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4119
4120 ADD_64(qstats->total_bytes_transmitted_hi,
4121 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4122 qstats->total_bytes_transmitted_lo,
4123 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004124
4125 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4126 total_unicast_packets_transmitted);
4127 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4128 total_multicast_packets_transmitted);
4129 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4130 total_broadcast_packets_transmitted);
4131
4132 old_tclient->checksum_discard = tclient->checksum_discard;
4133 old_tclient->ttl0_discard = tclient->ttl0_discard;
4134
4135 ADD_64(fstats->total_bytes_received_hi,
4136 qstats->total_bytes_received_hi,
4137 fstats->total_bytes_received_lo,
4138 qstats->total_bytes_received_lo);
4139 ADD_64(fstats->total_bytes_transmitted_hi,
4140 qstats->total_bytes_transmitted_hi,
4141 fstats->total_bytes_transmitted_lo,
4142 qstats->total_bytes_transmitted_lo);
4143 ADD_64(fstats->total_unicast_packets_received_hi,
4144 qstats->total_unicast_packets_received_hi,
4145 fstats->total_unicast_packets_received_lo,
4146 qstats->total_unicast_packets_received_lo);
4147 ADD_64(fstats->total_multicast_packets_received_hi,
4148 qstats->total_multicast_packets_received_hi,
4149 fstats->total_multicast_packets_received_lo,
4150 qstats->total_multicast_packets_received_lo);
4151 ADD_64(fstats->total_broadcast_packets_received_hi,
4152 qstats->total_broadcast_packets_received_hi,
4153 fstats->total_broadcast_packets_received_lo,
4154 qstats->total_broadcast_packets_received_lo);
4155 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4156 qstats->total_unicast_packets_transmitted_hi,
4157 fstats->total_unicast_packets_transmitted_lo,
4158 qstats->total_unicast_packets_transmitted_lo);
4159 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4160 qstats->total_multicast_packets_transmitted_hi,
4161 fstats->total_multicast_packets_transmitted_lo,
4162 qstats->total_multicast_packets_transmitted_lo);
4163 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4164 qstats->total_broadcast_packets_transmitted_hi,
4165 fstats->total_broadcast_packets_transmitted_lo,
4166 qstats->total_broadcast_packets_transmitted_lo);
4167 ADD_64(fstats->valid_bytes_received_hi,
4168 qstats->valid_bytes_received_hi,
4169 fstats->valid_bytes_received_lo,
4170 qstats->valid_bytes_received_lo);
4171
4172 ADD_64(estats->error_bytes_received_hi,
4173 qstats->error_bytes_received_hi,
4174 estats->error_bytes_received_lo,
4175 qstats->error_bytes_received_lo);
4176 ADD_64(estats->etherstatsoverrsizepkts_hi,
4177 qstats->etherstatsoverrsizepkts_hi,
4178 estats->etherstatsoverrsizepkts_lo,
4179 qstats->etherstatsoverrsizepkts_lo);
4180 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4181 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4182 }
4183
4184 ADD_64(fstats->total_bytes_received_hi,
4185 estats->rx_stat_ifhcinbadoctets_hi,
4186 fstats->total_bytes_received_lo,
4187 estats->rx_stat_ifhcinbadoctets_lo);
4188
4189 memcpy(estats, &(fstats->total_bytes_received_hi),
4190 sizeof(struct host_func_stats) - 2*sizeof(u32));
4191
4192 ADD_64(estats->etherstatsoverrsizepkts_hi,
4193 estats->rx_stat_dot3statsframestoolong_hi,
4194 estats->etherstatsoverrsizepkts_lo,
4195 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004196 ADD_64(estats->error_bytes_received_hi,
4197 estats->rx_stat_ifhcinbadoctets_hi,
4198 estats->error_bytes_received_lo,
4199 estats->rx_stat_ifhcinbadoctets_lo);
4200
Eilon Greensteinde832a52009-02-12 08:36:33 +00004201 if (bp->port.pmf) {
4202 estats->mac_filter_discard =
4203 le32_to_cpu(tport->mac_filter_discard);
4204 estats->xxoverflow_discard =
4205 le32_to_cpu(tport->xxoverflow_discard);
4206 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004207 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004208 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4209 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004210
4211 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4212
Eilon Greensteinde832a52009-02-12 08:36:33 +00004213 bp->stats_pending = 0;
4214
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004215 return 0;
4216}
4217
4218static void bnx2x_net_stats_update(struct bnx2x *bp)
4219{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004220 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004221 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004222 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004223
4224 nstats->rx_packets =
4225 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4226 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4227 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4228
4229 nstats->tx_packets =
4230 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4231 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4232 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4233
Eilon Greensteinde832a52009-02-12 08:36:33 +00004234 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004235
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004236 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004237
Eilon Greensteinde832a52009-02-12 08:36:33 +00004238 nstats->rx_dropped = estats->mac_discard;
Eilon Greensteinca003922009-08-12 22:53:28 -07004239 for_each_rx_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004240 nstats->rx_dropped +=
4241 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4242
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004243 nstats->tx_dropped = 0;
4244
4245 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004246 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004247
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004248 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004249 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004250
4251 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004252 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4253 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4254 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4255 bnx2x_hilo(&estats->brb_truncate_hi);
4256 nstats->rx_crc_errors =
4257 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4258 nstats->rx_frame_errors =
4259 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4260 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004261 nstats->rx_missed_errors = estats->xxoverflow_discard;
4262
4263 nstats->rx_errors = nstats->rx_length_errors +
4264 nstats->rx_over_errors +
4265 nstats->rx_crc_errors +
4266 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004267 nstats->rx_fifo_errors +
4268 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004269
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004270 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004271 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4272 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4273 nstats->tx_carrier_errors =
4274 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004275 nstats->tx_fifo_errors = 0;
4276 nstats->tx_heartbeat_errors = 0;
4277 nstats->tx_window_errors = 0;
4278
4279 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004280 nstats->tx_carrier_errors +
4281 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4282}
4283
4284static void bnx2x_drv_stats_update(struct bnx2x *bp)
4285{
4286 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4287 int i;
4288
4289 estats->driver_xoff = 0;
4290 estats->rx_err_discard_pkt = 0;
4291 estats->rx_skb_alloc_failed = 0;
4292 estats->hw_csum_err = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07004293 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004294 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4295
4296 estats->driver_xoff += qstats->driver_xoff;
4297 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4298 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4299 estats->hw_csum_err += qstats->hw_csum_err;
4300 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004301}
4302
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004303static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004304{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004305 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004306
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004307 if (*stats_comp != DMAE_COMP_VAL)
4308 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004309
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004310 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004311 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004312
Eilon Greensteinde832a52009-02-12 08:36:33 +00004313 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4314 BNX2X_ERR("storm stats were not updated for 3 times\n");
4315 bnx2x_panic();
4316 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004317 }
4318
Eilon Greensteinde832a52009-02-12 08:36:33 +00004319 bnx2x_net_stats_update(bp);
4320 bnx2x_drv_stats_update(bp);
4321
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004322 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004323 struct bnx2x_fastpath *fp0_rx = bp->fp;
4324 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004325 struct tstorm_per_client_stats *old_tclient =
4326 &bp->fp->old_tclient;
4327 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004330 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004331
4332 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4333 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4334 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004335 bnx2x_tx_avail(fp0_tx),
4336 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004337 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4338 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004339 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4340 fp0_rx->rx_comp_cons),
4341 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004342 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4343 "brb truncate %u\n",
4344 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4345 qstats->driver_xoff,
4346 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004348 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004349 "mac_discard %u mac_filter_discard %u "
4350 "xxovrflow_discard %u brb_truncate_discard %u "
4351 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004352 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004353 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4354 bnx2x_hilo(&qstats->no_buff_discard_hi),
4355 estats->mac_discard, estats->mac_filter_discard,
4356 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004357 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004358
4359 for_each_queue(bp, i) {
4360 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4361 bnx2x_fp(bp, i, tx_pkt),
4362 bnx2x_fp(bp, i, rx_pkt),
4363 bnx2x_fp(bp, i, rx_calls));
4364 }
4365 }
4366
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004367 bnx2x_hw_stats_post(bp);
4368 bnx2x_storm_stats_post(bp);
4369}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004370
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004371static void bnx2x_port_stats_stop(struct bnx2x *bp)
4372{
4373 struct dmae_command *dmae;
4374 u32 opcode;
4375 int loader_idx = PMF_DMAE_C(bp);
4376 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004377
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004378 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004379
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004380 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4381 DMAE_CMD_C_ENABLE |
4382 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004383#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004384 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004385#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004386 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004387#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004388 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4389 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4390
4391 if (bp->port.port_stx) {
4392
4393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4394 if (bp->func_stx)
4395 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4396 else
4397 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4398 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4399 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4400 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004401 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004402 dmae->len = sizeof(struct host_port_stats) >> 2;
4403 if (bp->func_stx) {
4404 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4405 dmae->comp_addr_hi = 0;
4406 dmae->comp_val = 1;
4407 } else {
4408 dmae->comp_addr_lo =
4409 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4410 dmae->comp_addr_hi =
4411 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4412 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004413
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004414 *stats_comp = 0;
4415 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004416 }
4417
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004418 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004419
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4421 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4422 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4423 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4424 dmae->dst_addr_lo = bp->func_stx >> 2;
4425 dmae->dst_addr_hi = 0;
4426 dmae->len = sizeof(struct host_func_stats) >> 2;
4427 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4428 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4429 dmae->comp_val = DMAE_COMP_VAL;
4430
4431 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004432 }
4433}
4434
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004435static void bnx2x_stats_stop(struct bnx2x *bp)
4436{
4437 int update = 0;
4438
4439 bnx2x_stats_comp(bp);
4440
4441 if (bp->port.pmf)
4442 update = (bnx2x_hw_stats_update(bp) == 0);
4443
4444 update |= (bnx2x_storm_stats_update(bp) == 0);
4445
4446 if (update) {
4447 bnx2x_net_stats_update(bp);
4448
4449 if (bp->port.pmf)
4450 bnx2x_port_stats_stop(bp);
4451
4452 bnx2x_hw_stats_post(bp);
4453 bnx2x_stats_comp(bp);
4454 }
4455}
4456
4457static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4458{
4459}
4460
4461static const struct {
4462 void (*action)(struct bnx2x *bp);
4463 enum bnx2x_stats_state next_state;
4464} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4465/* state event */
4466{
4467/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4468/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4469/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4470/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4471},
4472{
4473/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4474/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4475/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4476/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4477}
4478};
4479
4480static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4481{
4482 enum bnx2x_stats_state state = bp->stats_state;
4483
4484 bnx2x_stats_stm[state][event].action(bp);
4485 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4486
Eilon Greenstein89246652009-08-12 08:23:56 +00004487 /* Make sure the state has been "changed" */
4488 smp_wmb();
4489
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004490 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4491 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4492 state, event, bp->stats_state);
4493}
4494
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004495static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4496{
4497 struct dmae_command *dmae;
4498 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4499
4500 /* sanity */
4501 if (!bp->port.pmf || !bp->port.port_stx) {
4502 BNX2X_ERR("BUG!\n");
4503 return;
4504 }
4505
4506 bp->executer_idx = 0;
4507
4508 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4509 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4510 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4511 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4512#ifdef __BIG_ENDIAN
4513 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4514#else
4515 DMAE_CMD_ENDIANITY_DW_SWAP |
4516#endif
4517 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4518 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4519 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4520 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4521 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4522 dmae->dst_addr_hi = 0;
4523 dmae->len = sizeof(struct host_port_stats) >> 2;
4524 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4525 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4526 dmae->comp_val = DMAE_COMP_VAL;
4527
4528 *stats_comp = 0;
4529 bnx2x_hw_stats_post(bp);
4530 bnx2x_stats_comp(bp);
4531}
4532
4533static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4534{
4535 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4536 int port = BP_PORT(bp);
4537 int func;
4538 u32 func_stx;
4539
4540 /* sanity */
4541 if (!bp->port.pmf || !bp->func_stx) {
4542 BNX2X_ERR("BUG!\n");
4543 return;
4544 }
4545
4546 /* save our func_stx */
4547 func_stx = bp->func_stx;
4548
4549 for (vn = VN_0; vn < vn_max; vn++) {
4550 func = 2*vn + port;
4551
4552 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4553 bnx2x_func_stats_init(bp);
4554 bnx2x_hw_stats_post(bp);
4555 bnx2x_stats_comp(bp);
4556 }
4557
4558 /* restore our func_stx */
4559 bp->func_stx = func_stx;
4560}
4561
4562static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4563{
4564 struct dmae_command *dmae = &bp->stats_dmae;
4565 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4566
4567 /* sanity */
4568 if (!bp->func_stx) {
4569 BNX2X_ERR("BUG!\n");
4570 return;
4571 }
4572
4573 bp->executer_idx = 0;
4574 memset(dmae, 0, sizeof(struct dmae_command));
4575
4576 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4577 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4578 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4579#ifdef __BIG_ENDIAN
4580 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4581#else
4582 DMAE_CMD_ENDIANITY_DW_SWAP |
4583#endif
4584 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4585 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4586 dmae->src_addr_lo = bp->func_stx >> 2;
4587 dmae->src_addr_hi = 0;
4588 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4589 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4590 dmae->len = sizeof(struct host_func_stats) >> 2;
4591 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4592 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4593 dmae->comp_val = DMAE_COMP_VAL;
4594
4595 *stats_comp = 0;
4596 bnx2x_hw_stats_post(bp);
4597 bnx2x_stats_comp(bp);
4598}
4599
4600static void bnx2x_stats_init(struct bnx2x *bp)
4601{
4602 int port = BP_PORT(bp);
4603 int func = BP_FUNC(bp);
4604 int i;
4605
4606 bp->stats_pending = 0;
4607 bp->executer_idx = 0;
4608 bp->stats_counter = 0;
4609
4610 /* port and func stats for management */
4611 if (!BP_NOMCP(bp)) {
4612 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4613 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4614
4615 } else {
4616 bp->port.port_stx = 0;
4617 bp->func_stx = 0;
4618 }
4619 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4620 bp->port.port_stx, bp->func_stx);
4621
4622 /* port stats */
4623 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4624 bp->port.old_nig_stats.brb_discard =
4625 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4626 bp->port.old_nig_stats.brb_truncate =
4627 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4628 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4629 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4630 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4631 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4632
4633 /* function stats */
4634 for_each_queue(bp, i) {
4635 struct bnx2x_fastpath *fp = &bp->fp[i];
4636
4637 memset(&fp->old_tclient, 0,
4638 sizeof(struct tstorm_per_client_stats));
4639 memset(&fp->old_uclient, 0,
4640 sizeof(struct ustorm_per_client_stats));
4641 memset(&fp->old_xclient, 0,
4642 sizeof(struct xstorm_per_client_stats));
4643 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4644 }
4645
4646 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4647 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4648
4649 bp->stats_state = STATS_STATE_DISABLED;
4650
4651 if (bp->port.pmf) {
4652 if (bp->port.port_stx)
4653 bnx2x_port_stats_base_init(bp);
4654
4655 if (bp->func_stx)
4656 bnx2x_func_stats_base_init(bp);
4657
4658 } else if (bp->func_stx)
4659 bnx2x_func_stats_base_update(bp);
4660}
4661
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004662static void bnx2x_timer(unsigned long data)
4663{
4664 struct bnx2x *bp = (struct bnx2x *) data;
4665
4666 if (!netif_running(bp->dev))
4667 return;
4668
4669 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004670 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004671
4672 if (poll) {
4673 struct bnx2x_fastpath *fp = &bp->fp[0];
4674 int rc;
4675
Eilon Greenstein7961f792009-03-02 07:59:31 +00004676 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677 rc = bnx2x_rx_int(fp, 1000);
4678 }
4679
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004680 if (!BP_NOMCP(bp)) {
4681 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004682 u32 drv_pulse;
4683 u32 mcp_pulse;
4684
4685 ++bp->fw_drv_pulse_wr_seq;
4686 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4687 /* TBD - add SYSTEM_TIME */
4688 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004689 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004691 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004692 MCP_PULSE_SEQ_MASK);
4693 /* The delta between driver pulse and mcp response
4694 * should be 1 (before mcp response) or 0 (after mcp response)
4695 */
4696 if ((drv_pulse != mcp_pulse) &&
4697 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4698 /* someone lost a heartbeat... */
4699 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4700 drv_pulse, mcp_pulse);
4701 }
4702 }
4703
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004704 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004705 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004706
Eliezer Tamirf1410642008-02-28 11:51:50 -08004707timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004708 mod_timer(&bp->timer, jiffies + bp->current_interval);
4709}
4710
4711/* end of Statistics */
4712
4713/* nic init */
4714
4715/*
4716 * nic init service functions
4717 */
4718
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004719static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004720{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004721 int port = BP_PORT(bp);
4722
Eilon Greensteinca003922009-08-12 22:53:28 -07004723 /* "CSTORM" */
4724 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4726 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4727 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4728 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4729 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004730}
4731
Eilon Greenstein5c862842008-08-13 15:51:48 -07004732static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4733 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004734{
4735 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004736 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004737 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004738 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004739
4740 /* USTORM */
4741 section = ((u64)mapping) + offsetof(struct host_status_block,
4742 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004743 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004744
Eilon Greensteinca003922009-08-12 22:53:28 -07004745 REG_WR(bp, BAR_CSTRORM_INTMEM +
4746 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4747 REG_WR(bp, BAR_CSTRORM_INTMEM +
4748 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004749 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004750 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4751 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004752
4753 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004754 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4755 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004756
4757 /* CSTORM */
4758 section = ((u64)mapping) + offsetof(struct host_status_block,
4759 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004760 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004761
4762 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004763 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004764 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004765 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004766 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004767 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004768 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004769
4770 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4771 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004772 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004773
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4775}
4776
4777static void bnx2x_zero_def_sb(struct bnx2x *bp)
4778{
4779 int func = BP_FUNC(bp);
4780
Eilon Greensteinca003922009-08-12 22:53:28 -07004781 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004782 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4783 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004784 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4785 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4786 sizeof(struct cstorm_def_status_block_u)/4);
4787 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4788 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4789 sizeof(struct cstorm_def_status_block_c)/4);
4790 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004791 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4792 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004793}
4794
4795static void bnx2x_init_def_sb(struct bnx2x *bp,
4796 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004797 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004798{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004799 int port = BP_PORT(bp);
4800 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004801 int index, val, reg_offset;
4802 u64 section;
4803
4804 /* ATTN */
4805 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4806 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004807 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004808
Eliezer Tamir49d66772008-02-28 11:53:13 -08004809 bp->attn_state = 0;
4810
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004811 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4812 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004814 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004815 bp->attn_group[index].sig[0] = REG_RD(bp,
4816 reg_offset + 0x10*index);
4817 bp->attn_group[index].sig[1] = REG_RD(bp,
4818 reg_offset + 0x4 + 0x10*index);
4819 bp->attn_group[index].sig[2] = REG_RD(bp,
4820 reg_offset + 0x8 + 0x10*index);
4821 bp->attn_group[index].sig[3] = REG_RD(bp,
4822 reg_offset + 0xc + 0x10*index);
4823 }
4824
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004825 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4826 HC_REG_ATTN_MSG0_ADDR_L);
4827
4828 REG_WR(bp, reg_offset, U64_LO(section));
4829 REG_WR(bp, reg_offset + 4, U64_HI(section));
4830
4831 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4832
4833 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004834 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004835 REG_WR(bp, reg_offset, val);
4836
4837 /* USTORM */
4838 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4839 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004840 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004841
Eilon Greensteinca003922009-08-12 22:53:28 -07004842 REG_WR(bp, BAR_CSTRORM_INTMEM +
4843 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4844 REG_WR(bp, BAR_CSTRORM_INTMEM +
4845 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004846 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004847 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4848 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004849
4850 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004851 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4852 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004853
4854 /* CSTORM */
4855 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4856 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004857 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004858
4859 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004860 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004861 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004862 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004863 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004864 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004865 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004866
4867 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4868 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004869 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004870
4871 /* TSTORM */
4872 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4873 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004874 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004875
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004877 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004878 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004879 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004880 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004881 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004882 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004883
4884 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4885 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004886 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004887
4888 /* XSTORM */
4889 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4890 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004891 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004892
4893 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004894 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004895 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004896 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004897 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004898 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004899 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004900
4901 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4902 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004903 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004904
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004905 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004906 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004907
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004908 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004909}
4910
4911static void bnx2x_update_coalesce(struct bnx2x *bp)
4912{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004913 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004914 int i;
4915
4916 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004917 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004918
4919 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004920 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4921 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4922 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004923 bp->rx_ticks/12);
Eilon Greensteinca003922009-08-12 22:53:28 -07004924 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4925 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4926 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein3799cf42009-07-05 04:18:12 +00004927 (bp->rx_ticks/12) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004928
4929 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4930 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004931 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4932 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004933 bp->tx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004934 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004935 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4936 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein3799cf42009-07-05 04:18:12 +00004937 (bp->tx_ticks/12) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004938 }
4939}
4940
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004941static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4942 struct bnx2x_fastpath *fp, int last)
4943{
4944 int i;
4945
4946 for (i = 0; i < last; i++) {
4947 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4948 struct sk_buff *skb = rx_buf->skb;
4949
4950 if (skb == NULL) {
4951 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4952 continue;
4953 }
4954
4955 if (fp->tpa_state[i] == BNX2X_TPA_START)
4956 pci_unmap_single(bp->pdev,
4957 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004958 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004959
4960 dev_kfree_skb(skb);
4961 rx_buf->skb = NULL;
4962 }
4963}
4964
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004965static void bnx2x_init_rx_rings(struct bnx2x *bp)
4966{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004967 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004968 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4969 ETH_MAX_AGGREGATION_QUEUES_E1H;
4970 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004971 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004972
Eilon Greenstein87942b42009-02-12 08:36:49 +00004973 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004974 DP(NETIF_MSG_IFUP,
4975 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004976
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004977 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004978
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004979 for_each_rx_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004980 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004981
Eilon Greenstein32626232008-08-13 15:51:07 -07004982 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004983 fp->tpa_pool[i].skb =
4984 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4985 if (!fp->tpa_pool[i].skb) {
4986 BNX2X_ERR("Failed to allocate TPA "
4987 "skb pool for queue[%d] - "
4988 "disabling TPA on this "
4989 "queue!\n", j);
4990 bnx2x_free_tpa_pool(bp, fp, i);
4991 fp->disable_tpa = 1;
4992 break;
4993 }
4994 pci_unmap_addr_set((struct sw_rx_bd *)
4995 &bp->fp->tpa_pool[i],
4996 mapping, 0);
4997 fp->tpa_state[i] = BNX2X_TPA_STOP;
4998 }
4999 }
5000 }
5001
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005002 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003 struct bnx2x_fastpath *fp = &bp->fp[j];
5004
5005 fp->rx_bd_cons = 0;
5006 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005007 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005008
Eilon Greensteinca003922009-08-12 22:53:28 -07005009 /* Mark queue as Rx */
5010 fp->is_rx_queue = 1;
5011
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005012 /* "next page" elements initialization */
5013 /* SGE ring */
5014 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5015 struct eth_rx_sge *sge;
5016
5017 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5018 sge->addr_hi =
5019 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5020 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5021 sge->addr_lo =
5022 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5023 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5024 }
5025
5026 bnx2x_init_sge_ring_bit_mask(fp);
5027
5028 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005029 for (i = 1; i <= NUM_RX_RINGS; i++) {
5030 struct eth_rx_bd *rx_bd;
5031
5032 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5033 rx_bd->addr_hi =
5034 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005035 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005036 rx_bd->addr_lo =
5037 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005038 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005039 }
5040
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005041 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005042 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5043 struct eth_rx_cqe_next_page *nextpg;
5044
5045 nextpg = (struct eth_rx_cqe_next_page *)
5046 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5047 nextpg->addr_hi =
5048 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005049 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005050 nextpg->addr_lo =
5051 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005052 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005053 }
5054
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005055 /* Allocate SGEs and initialize the ring elements */
5056 for (i = 0, ring_prod = 0;
5057 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005058
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005059 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5060 BNX2X_ERR("was only able to allocate "
5061 "%d rx sges\n", i);
5062 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5063 /* Cleanup already allocated elements */
5064 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005065 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005066 fp->disable_tpa = 1;
5067 ring_prod = 0;
5068 break;
5069 }
5070 ring_prod = NEXT_SGE_IDX(ring_prod);
5071 }
5072 fp->rx_sge_prod = ring_prod;
5073
5074 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005075 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005076 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005077 for (i = 0; i < bp->rx_ring_size; i++) {
5078 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5079 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005080 "%d rx skbs on queue[%d]\n", i, j);
5081 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005082 break;
5083 }
5084 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005085 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005086 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005087 }
5088
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005089 fp->rx_bd_prod = ring_prod;
5090 /* must not have more available CQEs than BDs */
5091 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5092 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005093 fp->rx_pkt = fp->rx_calls = 0;
5094
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005095 /* Warning!
5096 * this will generate an interrupt (to the TSTORM)
5097 * must only be done after chip is initialized
5098 */
5099 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5100 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005101 if (j != 0)
5102 continue;
5103
5104 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005105 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005106 U64_LO(fp->rx_comp_mapping));
5107 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005108 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005109 U64_HI(fp->rx_comp_mapping));
5110 }
5111}
5112
5113static void bnx2x_init_tx_ring(struct bnx2x *bp)
5114{
5115 int i, j;
5116
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005117 for_each_tx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005118 struct bnx2x_fastpath *fp = &bp->fp[j];
5119
5120 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005121 struct eth_tx_next_bd *tx_next_bd =
5122 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005123
Eilon Greensteinca003922009-08-12 22:53:28 -07005124 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005125 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005126 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005127 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005128 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005129 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005130 }
5131
Eilon Greensteinca003922009-08-12 22:53:28 -07005132 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5133 fp->tx_db.data.zero_fill1 = 0;
5134 fp->tx_db.data.prod = 0;
5135
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136 fp->tx_pkt_prod = 0;
5137 fp->tx_pkt_cons = 0;
5138 fp->tx_bd_prod = 0;
5139 fp->tx_bd_cons = 0;
5140 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5141 fp->tx_pkt = 0;
5142 }
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005143
5144 /* clean tx statistics */
5145 for_each_rx_queue(bp, i)
5146 bnx2x_fp(bp, i, tx_pkt) = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005147}
5148
5149static void bnx2x_init_sp_ring(struct bnx2x *bp)
5150{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005151 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005152
5153 spin_lock_init(&bp->spq_lock);
5154
5155 bp->spq_left = MAX_SPQ_PENDING;
5156 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005157 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5158 bp->spq_prod_bd = bp->spq;
5159 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5160
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005161 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005163 REG_WR(bp,
5164 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005165 U64_HI(bp->spq_mapping));
5166
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005167 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005168 bp->spq_prod_idx);
5169}
5170
5171static void bnx2x_init_context(struct bnx2x *bp)
5172{
5173 int i;
5174
Eilon Greensteinca003922009-08-12 22:53:28 -07005175 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005176 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5177 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005178 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005179
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005180 context->ustorm_st_context.common.sb_index_numbers =
5181 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005182 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005183 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005184 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005185 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5186 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5187 context->ustorm_st_context.common.statistics_counter_id =
5188 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005189 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005190 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005191 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005192 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005193 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005194 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005195 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005196 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005197 if (!fp->disable_tpa) {
5198 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005199 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005200 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005201 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5202 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005203 context->ustorm_st_context.common.sge_page_base_hi =
5204 U64_HI(fp->rx_sge_mapping);
5205 context->ustorm_st_context.common.sge_page_base_lo =
5206 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005207
5208 context->ustorm_st_context.common.max_sges_for_packet =
5209 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5210 context->ustorm_st_context.common.max_sges_for_packet =
5211 ((context->ustorm_st_context.common.
5212 max_sges_for_packet + PAGES_PER_SGE - 1) &
5213 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005214 }
5215
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005216 context->ustorm_ag_context.cdu_usage =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_UCM_AG,
5219 ETH_CONNECTION_TYPE);
5220
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005221 context->xstorm_ag_context.cdu_reserved =
5222 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5223 CDU_REGION_NUMBER_XCM_AG,
5224 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005225 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005226
5227 for_each_tx_queue(bp, i) {
5228 struct bnx2x_fastpath *fp = &bp->fp[i];
5229 struct eth_context *context =
5230 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5231
5232 context->cstorm_st_context.sb_index_number =
5233 C_SB_ETH_TX_CQ_INDEX;
5234 context->cstorm_st_context.status_block_id = fp->sb_id;
5235
5236 context->xstorm_st_context.tx_bd_page_base_hi =
5237 U64_HI(fp->tx_desc_mapping);
5238 context->xstorm_st_context.tx_bd_page_base_lo =
5239 U64_LO(fp->tx_desc_mapping);
5240 context->xstorm_st_context.statistics_data = (fp->cl_id |
5241 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5242 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005243}
5244
5245static void bnx2x_init_ind_table(struct bnx2x *bp)
5246{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005247 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005248 int i;
5249
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005250 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005251 return;
5252
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005253 DP(NETIF_MSG_IFUP,
5254 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005255 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005256 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005257 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005258 bp->fp->cl_id + (i % bp->num_rx_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005259}
5260
Eliezer Tamir49d66772008-02-28 11:53:13 -08005261static void bnx2x_set_client_config(struct bnx2x *bp)
5262{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005263 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005264 int port = BP_PORT(bp);
5265 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005266
Eilon Greensteine7799c52009-01-14 21:30:27 -08005267 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005268 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005269 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5270 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005271#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005272 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005273 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005274 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005275 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5276 }
5277#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005278
5279 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005280 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5281
Eliezer Tamir49d66772008-02-28 11:53:13 -08005282 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005283 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005284 ((u32 *)&tstorm_client)[0]);
5285 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005286 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005287 ((u32 *)&tstorm_client)[1]);
5288 }
5289
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005290 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5291 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005292}
5293
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005294static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5295{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005296 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005297 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005298 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005299 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005300 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005301 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005302 /* All but management unicast packets should pass to the host as well */
5303 u32 llh_mask =
5304 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5305 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5306 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5307 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005308
Eilon Greenstein3196a882008-08-13 15:58:49 -07005309 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310
5311 switch (mode) {
5312 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005313 tstorm_mac_filter.ucast_drop_all = mask;
5314 tstorm_mac_filter.mcast_drop_all = mask;
5315 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005317
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005318 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005319 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005320 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005321
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005323 tstorm_mac_filter.mcast_accept_all = mask;
5324 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005326
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005328 tstorm_mac_filter.ucast_accept_all = mask;
5329 tstorm_mac_filter.mcast_accept_all = mask;
5330 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005331 /* pass management unicast packets as well */
5332 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005333 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005334
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005335 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005336 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5337 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005338 }
5339
Eilon Greenstein581ce432009-07-29 00:20:04 +00005340 REG_WR(bp,
5341 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5342 llh_mask);
5343
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005344 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5345 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005346 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005347 ((u32 *)&tstorm_mac_filter)[i]);
5348
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005349/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005350 ((u32 *)&tstorm_mac_filter)[i]); */
5351 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005352
Eliezer Tamir49d66772008-02-28 11:53:13 -08005353 if (mode != BNX2X_RX_MODE_NONE)
5354 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005355}
5356
Eilon Greenstein471de712008-08-13 15:49:35 -07005357static void bnx2x_init_internal_common(struct bnx2x *bp)
5358{
5359 int i;
5360
5361 /* Zero this manually as its initialization is
5362 currently missing in the initTool */
5363 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5364 REG_WR(bp, BAR_USTRORM_INTMEM +
5365 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5366}
5367
5368static void bnx2x_init_internal_port(struct bnx2x *bp)
5369{
5370 int port = BP_PORT(bp);
5371
Eilon Greensteinca003922009-08-12 22:53:28 -07005372 REG_WR(bp,
5373 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5374 REG_WR(bp,
5375 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005376 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5377 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5378}
5379
5380static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005381{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005382 struct tstorm_eth_function_common_config tstorm_config = {0};
5383 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005384 int port = BP_PORT(bp);
5385 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005386 int i, j;
5387 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005388 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005389
5390 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005391 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005392 tstorm_config.rss_result_mask = MULTI_MASK;
5393 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005394
5395 /* Enable TPA if needed */
5396 if (bp->flags & TPA_ENABLE_FLAG)
5397 tstorm_config.config_flags |=
5398 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5399
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005400 if (IS_E1HMF(bp))
5401 tstorm_config.config_flags |=
5402 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005403
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005404 tstorm_config.leading_client_id = BP_L_ID(bp);
5405
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005406 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005407 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005408 (*(u32 *)&tstorm_config));
5409
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005410 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005411 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005412 bnx2x_set_storm_rx_mode(bp);
5413
Eilon Greensteinde832a52009-02-12 08:36:33 +00005414 for_each_queue(bp, i) {
5415 u8 cl_id = bp->fp[i].cl_id;
5416
5417 /* reset xstorm per client statistics */
5418 offset = BAR_XSTRORM_INTMEM +
5419 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5420 for (j = 0;
5421 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5422 REG_WR(bp, offset + j*4, 0);
5423
5424 /* reset tstorm per client statistics */
5425 offset = BAR_TSTRORM_INTMEM +
5426 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5427 for (j = 0;
5428 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5429 REG_WR(bp, offset + j*4, 0);
5430
5431 /* reset ustorm per client statistics */
5432 offset = BAR_USTRORM_INTMEM +
5433 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5434 for (j = 0;
5435 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5436 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005437 }
5438
5439 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005440 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005441
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005442 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005443 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005444 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005445 ((u32 *)&stats_flags)[1]);
5446
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005447 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005448 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005449 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005450 ((u32 *)&stats_flags)[1]);
5451
Eilon Greensteinde832a52009-02-12 08:36:33 +00005452 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5453 ((u32 *)&stats_flags)[0]);
5454 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5455 ((u32 *)&stats_flags)[1]);
5456
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005457 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005458 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005459 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005460 ((u32 *)&stats_flags)[1]);
5461
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005462 REG_WR(bp, BAR_XSTRORM_INTMEM +
5463 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5464 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5465 REG_WR(bp, BAR_XSTRORM_INTMEM +
5466 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5467 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5468
5469 REG_WR(bp, BAR_TSTRORM_INTMEM +
5470 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5471 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5472 REG_WR(bp, BAR_TSTRORM_INTMEM +
5473 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5474 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005475
Eilon Greensteinde832a52009-02-12 08:36:33 +00005476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5478 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5479 REG_WR(bp, BAR_USTRORM_INTMEM +
5480 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5481 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5482
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005483 if (CHIP_IS_E1H(bp)) {
5484 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5485 IS_E1HMF(bp));
5486 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5487 IS_E1HMF(bp));
5488 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5489 IS_E1HMF(bp));
5490 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5491 IS_E1HMF(bp));
5492
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005493 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5494 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005495 }
5496
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005497 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5498 max_agg_size =
5499 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5500 SGE_PAGE_SIZE * PAGES_PER_SGE),
5501 (u32)0xffff);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005502 for_each_rx_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005503 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005504
5505 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005506 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005507 U64_LO(fp->rx_comp_mapping));
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005509 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005510 U64_HI(fp->rx_comp_mapping));
5511
Eilon Greensteinca003922009-08-12 22:53:28 -07005512 /* Next page */
5513 REG_WR(bp, BAR_USTRORM_INTMEM +
5514 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5515 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5516 REG_WR(bp, BAR_USTRORM_INTMEM +
5517 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5518 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5519
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005520 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005521 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005522 max_agg_size);
5523 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005524
Eilon Greenstein1c063282009-02-12 08:36:43 +00005525 /* dropless flow control */
5526 if (CHIP_IS_E1H(bp)) {
5527 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5528
5529 rx_pause.bd_thr_low = 250;
5530 rx_pause.cqe_thr_low = 250;
5531 rx_pause.cos = 1;
5532 rx_pause.sge_thr_low = 0;
5533 rx_pause.bd_thr_high = 350;
5534 rx_pause.cqe_thr_high = 350;
5535 rx_pause.sge_thr_high = 0;
5536
5537 for_each_rx_queue(bp, i) {
5538 struct bnx2x_fastpath *fp = &bp->fp[i];
5539
5540 if (!fp->disable_tpa) {
5541 rx_pause.sge_thr_low = 150;
5542 rx_pause.sge_thr_high = 250;
5543 }
5544
5545
5546 offset = BAR_USTRORM_INTMEM +
5547 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5548 fp->cl_id);
5549 for (j = 0;
5550 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5551 j++)
5552 REG_WR(bp, offset + j*4,
5553 ((u32 *)&rx_pause)[j]);
5554 }
5555 }
5556
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005557 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5558
5559 /* Init rate shaping and fairness contexts */
5560 if (IS_E1HMF(bp)) {
5561 int vn;
5562
5563 /* During init there is no active link
5564 Until link is up, set link rate to 10Gbps */
5565 bp->link_vars.line_speed = SPEED_10000;
5566 bnx2x_init_port_minmax(bp);
5567
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005568 if (!BP_NOMCP(bp))
5569 bp->mf_config =
5570 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005571 bnx2x_calc_vn_weight_sum(bp);
5572
5573 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5574 bnx2x_init_vn_minmax(bp, 2*vn + port);
5575
5576 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005577 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005578 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005579
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005580 } else {
5581 /* rate shaping and fairness are disabled */
5582 DP(NETIF_MSG_IFUP,
5583 "single function mode minmax will be disabled\n");
5584 }
5585
5586
5587 /* Store it to internal memory */
5588 if (bp->port.pmf)
5589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5590 REG_WR(bp, BAR_XSTRORM_INTMEM +
5591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5592 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005593}
5594
Eilon Greenstein471de712008-08-13 15:49:35 -07005595static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5596{
5597 switch (load_code) {
5598 case FW_MSG_CODE_DRV_LOAD_COMMON:
5599 bnx2x_init_internal_common(bp);
5600 /* no break */
5601
5602 case FW_MSG_CODE_DRV_LOAD_PORT:
5603 bnx2x_init_internal_port(bp);
5604 /* no break */
5605
5606 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5607 bnx2x_init_internal_func(bp);
5608 break;
5609
5610 default:
5611 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5612 break;
5613 }
5614}
5615
5616static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005617{
5618 int i;
5619
5620 for_each_queue(bp, i) {
5621 struct bnx2x_fastpath *fp = &bp->fp[i];
5622
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005623 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005624 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005625 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005626 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005627#ifdef BCM_CNIC
5628 fp->sb_id = fp->cl_id + 1;
5629#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005630 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005631#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07005632 /* Suitable Rx and Tx SBs are served by the same client */
5633 if (i >= bp->num_rx_queues)
5634 fp->cl_id -= bp->num_rx_queues;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005635 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005636 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5637 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005638 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005639 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005640 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005641 }
5642
Eilon Greenstein16119782009-03-02 07:59:27 +00005643 /* ensure status block indices were read */
5644 rmb();
5645
5646
Eilon Greenstein5c862842008-08-13 15:51:48 -07005647 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5648 DEF_SB_ID);
5649 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005650 bnx2x_update_coalesce(bp);
5651 bnx2x_init_rx_rings(bp);
5652 bnx2x_init_tx_ring(bp);
5653 bnx2x_init_sp_ring(bp);
5654 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005655 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005656 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005657 bnx2x_stats_init(bp);
5658
5659 /* At this point, we are ready for interrupts */
5660 atomic_set(&bp->intr_sem, 0);
5661
5662 /* flush all before enabling interrupts */
5663 mb();
5664 mmiowb();
5665
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005666 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005667
5668 /* Check for SPIO5 */
5669 bnx2x_attn_int_deasserted0(bp,
5670 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5671 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005672}
5673
5674/* end of nic init */
5675
5676/*
5677 * gzip service functions
5678 */
5679
5680static int bnx2x_gunzip_init(struct bnx2x *bp)
5681{
5682 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5683 &bp->gunzip_mapping);
5684 if (bp->gunzip_buf == NULL)
5685 goto gunzip_nomem1;
5686
5687 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5688 if (bp->strm == NULL)
5689 goto gunzip_nomem2;
5690
5691 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5692 GFP_KERNEL);
5693 if (bp->strm->workspace == NULL)
5694 goto gunzip_nomem3;
5695
5696 return 0;
5697
5698gunzip_nomem3:
5699 kfree(bp->strm);
5700 bp->strm = NULL;
5701
5702gunzip_nomem2:
5703 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5704 bp->gunzip_mapping);
5705 bp->gunzip_buf = NULL;
5706
5707gunzip_nomem1:
5708 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005709 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005710 return -ENOMEM;
5711}
5712
5713static void bnx2x_gunzip_end(struct bnx2x *bp)
5714{
5715 kfree(bp->strm->workspace);
5716
5717 kfree(bp->strm);
5718 bp->strm = NULL;
5719
5720 if (bp->gunzip_buf) {
5721 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5722 bp->gunzip_mapping);
5723 bp->gunzip_buf = NULL;
5724 }
5725}
5726
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005727static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005728{
5729 int n, rc;
5730
5731 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005732 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5733 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005734 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005735 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005736
5737 n = 10;
5738
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005739#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005740
5741 if (zbuf[3] & FNAME)
5742 while ((zbuf[n++] != 0) && (n < len));
5743
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005744 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005745 bp->strm->avail_in = len - n;
5746 bp->strm->next_out = bp->gunzip_buf;
5747 bp->strm->avail_out = FW_BUF_SIZE;
5748
5749 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5750 if (rc != Z_OK)
5751 return rc;
5752
5753 rc = zlib_inflate(bp->strm, Z_FINISH);
5754 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5755 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5756 bp->dev->name, bp->strm->msg);
5757
5758 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5759 if (bp->gunzip_outlen & 0x3)
5760 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5761 " gunzip_outlen (%d) not aligned\n",
5762 bp->dev->name, bp->gunzip_outlen);
5763 bp->gunzip_outlen >>= 2;
5764
5765 zlib_inflateEnd(bp->strm);
5766
5767 if (rc == Z_STREAM_END)
5768 return 0;
5769
5770 return rc;
5771}
5772
5773/* nic load/unload */
5774
5775/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005776 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005777 */
5778
5779/* send a NIG loopback debug packet */
5780static void bnx2x_lb_pckt(struct bnx2x *bp)
5781{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005782 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005783
5784 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005785 wb_write[0] = 0x55555555;
5786 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005787 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005788 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005789
5790 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005791 wb_write[0] = 0x09000000;
5792 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005793 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005794 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005795}
5796
5797/* some of the internal memories
5798 * are not directly readable from the driver
5799 * to test them we send debug packets
5800 */
5801static int bnx2x_int_mem_test(struct bnx2x *bp)
5802{
5803 int factor;
5804 int count, i;
5805 u32 val = 0;
5806
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005807 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005808 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005809 else if (CHIP_REV_IS_EMUL(bp))
5810 factor = 200;
5811 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005812 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005813
5814 DP(NETIF_MSG_HW, "start part1\n");
5815
5816 /* Disable inputs of parser neighbor blocks */
5817 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5818 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5819 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005820 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005821
5822 /* Write 0 to parser credits for CFC search request */
5823 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5824
5825 /* send Ethernet packet */
5826 bnx2x_lb_pckt(bp);
5827
5828 /* TODO do i reset NIG statistic? */
5829 /* Wait until NIG register shows 1 packet of size 0x10 */
5830 count = 1000 * factor;
5831 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005832
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005833 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5834 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005835 if (val == 0x10)
5836 break;
5837
5838 msleep(10);
5839 count--;
5840 }
5841 if (val != 0x10) {
5842 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5843 return -1;
5844 }
5845
5846 /* Wait until PRS register shows 1 packet */
5847 count = 1000 * factor;
5848 while (count) {
5849 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005850 if (val == 1)
5851 break;
5852
5853 msleep(10);
5854 count--;
5855 }
5856 if (val != 0x1) {
5857 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5858 return -2;
5859 }
5860
5861 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005863 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005865 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005866 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5867 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005868
5869 DP(NETIF_MSG_HW, "part2\n");
5870
5871 /* Disable inputs of parser neighbor blocks */
5872 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5873 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5874 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005875 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005876
5877 /* Write 0 to parser credits for CFC search request */
5878 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5879
5880 /* send 10 Ethernet packets */
5881 for (i = 0; i < 10; i++)
5882 bnx2x_lb_pckt(bp);
5883
5884 /* Wait until NIG register shows 10 + 1
5885 packets of size 11*0x10 = 0xb0 */
5886 count = 1000 * factor;
5887 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005888
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005889 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5890 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005891 if (val == 0xb0)
5892 break;
5893
5894 msleep(10);
5895 count--;
5896 }
5897 if (val != 0xb0) {
5898 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5899 return -3;
5900 }
5901
5902 /* Wait until PRS register shows 2 packets */
5903 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5904 if (val != 2)
5905 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5906
5907 /* Write 1 to parser credits for CFC search request */
5908 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5909
5910 /* Wait until PRS register shows 3 packets */
5911 msleep(10 * factor);
5912 /* Wait until NIG register shows 1 packet of size 0x10 */
5913 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5914 if (val != 3)
5915 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5916
5917 /* clear NIG EOP FIFO */
5918 for (i = 0; i < 11; i++)
5919 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5920 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5921 if (val != 1) {
5922 BNX2X_ERR("clear of NIG failed\n");
5923 return -4;
5924 }
5925
5926 /* Reset and init BRB, PRS, NIG */
5927 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5928 msleep(50);
5929 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5930 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005931 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5932 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005933#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005934 /* set NIC mode */
5935 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5936#endif
5937
5938 /* Enable inputs of parser neighbor blocks */
5939 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5940 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5941 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005942 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005943
5944 DP(NETIF_MSG_HW, "done\n");
5945
5946 return 0; /* OK */
5947}
5948
5949static void enable_blocks_attention(struct bnx2x *bp)
5950{
5951 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5952 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5953 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5954 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5955 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5956 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5957 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5958 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5959 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005960/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5961/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005962 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5963 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5964 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005965/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5966/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005967 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5968 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5969 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5970 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005971/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5972/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5973 if (CHIP_REV_IS_FPGA(bp))
5974 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5975 else
5976 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005977 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5978 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5979 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005980/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5981/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005982 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5983 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005984/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5985 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005986}
5987
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005988
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005989static void bnx2x_reset_common(struct bnx2x *bp)
5990{
5991 /* reset_common */
5992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5993 0xd3ffff7f);
5994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5995}
5996
Eilon Greenstein573f2032009-08-12 08:24:14 +00005997static void bnx2x_init_pxp(struct bnx2x *bp)
5998{
5999 u16 devctl;
6000 int r_order, w_order;
6001
6002 pci_read_config_word(bp->pdev,
6003 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6004 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6005 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6006 if (bp->mrrs == -1)
6007 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6008 else {
6009 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6010 r_order = bp->mrrs;
6011 }
6012
6013 bnx2x_init_pxp_arb(bp, r_order, w_order);
6014}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006015
6016static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6017{
6018 u32 val;
6019 u8 port;
6020 u8 is_required = 0;
6021
6022 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6023 SHARED_HW_CFG_FAN_FAILURE_MASK;
6024
6025 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6026 is_required = 1;
6027
6028 /*
6029 * The fan failure mechanism is usually related to the PHY type since
6030 * the power consumption of the board is affected by the PHY. Currently,
6031 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6032 */
6033 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6034 for (port = PORT_0; port < PORT_MAX; port++) {
6035 u32 phy_type =
6036 SHMEM_RD(bp, dev_info.port_hw_config[port].
6037 external_phy_config) &
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6039 is_required |=
6040 ((phy_type ==
6041 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6042 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006043 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6044 (phy_type ==
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6046 }
6047
6048 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6049
6050 if (is_required == 0)
6051 return;
6052
6053 /* Fan failure is indicated by SPIO 5 */
6054 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6055 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6056
6057 /* set to active low mode */
6058 val = REG_RD(bp, MISC_REG_SPIO_INT);
6059 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6060 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6061 REG_WR(bp, MISC_REG_SPIO_INT, val);
6062
6063 /* enable interrupt to signal the IGU */
6064 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6065 val |= (1 << MISC_REGISTERS_SPIO_5);
6066 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6067}
6068
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006069static int bnx2x_init_common(struct bnx2x *bp)
6070{
6071 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006072#ifdef BCM_CNIC
6073 u32 wb_write[2];
6074#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006075
6076 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6077
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006078 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6081
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006082 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006083 if (CHIP_IS_E1H(bp))
6084 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6085
6086 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6087 msleep(30);
6088 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6089
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006090 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006091 if (CHIP_IS_E1(bp)) {
6092 /* enable HW interrupt from PXP on USDM overflow
6093 bit 16 on INT_MASK_0 */
6094 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006095 }
6096
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006097 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006098 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006099
6100#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006101 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6103 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6104 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6105 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006106 /* make sure this value is 0 */
6107 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006109/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6110 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6111 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6112 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6113 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006114#endif
6115
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006116 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006117#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006118 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6119 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6120 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006121#endif
6122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006123 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6124 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006125
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006126 /* let the HW do it's magic ... */
6127 msleep(100);
6128 /* finish PXP init */
6129 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6130 if (val != 1) {
6131 BNX2X_ERR("PXP2 CFG failed\n");
6132 return -EBUSY;
6133 }
6134 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6135 if (val != 1) {
6136 BNX2X_ERR("PXP2 RD_INIT failed\n");
6137 return -EBUSY;
6138 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006139
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006140 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6141 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006142
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006143 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006144
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006145 /* clean the DMAE memory */
6146 bp->dmae_ready = 1;
6147 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006148
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006149 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6150 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6151 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6152 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006153
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006154 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6155 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6157 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6158
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006159 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006160
6161#ifdef BCM_CNIC
6162 wb_write[0] = 0;
6163 wb_write[1] = 0;
6164 for (i = 0; i < 64; i++) {
6165 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6166 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6167
6168 if (CHIP_IS_E1H(bp)) {
6169 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6170 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6171 wb_write, 2);
6172 }
6173 }
6174#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006175 /* soft reset pulse */
6176 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6177 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006178
Michael Chan37b091b2009-10-10 13:46:55 +00006179#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006180 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006181#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006182
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006183 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006184 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6185 if (!CHIP_REV_IS_SLOW(bp)) {
6186 /* enable hw interrupt from doorbell Q */
6187 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6188 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006189
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006190 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006192 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006193#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006194 /* set NIC mode */
6195 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006196#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006197 if (CHIP_IS_E1H(bp))
6198 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006199
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006200 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6201 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6202 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6203 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006204
Eilon Greensteinca003922009-08-12 22:53:28 -07006205 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6206 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6207 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6208 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006209
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006210 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6211 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6212 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6213 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006214
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006215 /* sync semi rtc */
6216 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6217 0x80000000);
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6219 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006220
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006221 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6222 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6223 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006224
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006225 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6226 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6227 REG_WR(bp, i, 0xc0cac01a);
6228 /* TODO: replace with something meaningful */
6229 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006230 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006231#ifdef BCM_CNIC
6232 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6237 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6238 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6239 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6240 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6241 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6242#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006243 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006244
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006245 if (sizeof(union cdu_context) != 1024)
6246 /* we currently assume that a context is 1024 bytes */
6247 printk(KERN_ALERT PFX "please adjust the size of"
6248 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006250 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006251 val = (4 << 24) + (0 << 12) + 1024;
6252 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006253
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006254 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006255 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006256 /* enable context validation interrupt from CFC */
6257 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6258
6259 /* set the thresholds to prevent CFC/CDU race */
6260 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006261
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006262 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6263 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006264
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006265 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006266 /* Reset PCIE errors for debug */
6267 REG_WR(bp, 0x2814, 0xffffffff);
6268 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006269
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006270 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006271 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006272 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006273 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006274
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006275 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006276 if (CHIP_IS_E1H(bp)) {
6277 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6278 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6279 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006280
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006281 if (CHIP_REV_IS_SLOW(bp))
6282 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006283
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006284 /* finish CFC init */
6285 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6286 if (val != 1) {
6287 BNX2X_ERR("CFC LL_INIT failed\n");
6288 return -EBUSY;
6289 }
6290 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6291 if (val != 1) {
6292 BNX2X_ERR("CFC AC_INIT failed\n");
6293 return -EBUSY;
6294 }
6295 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6296 if (val != 1) {
6297 BNX2X_ERR("CFC CAM_INIT failed\n");
6298 return -EBUSY;
6299 }
6300 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006301
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006302 /* read NIG statistic
6303 to see if this is our first up since powerup */
6304 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6305 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006306
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006307 /* do internal memory self test */
6308 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6309 BNX2X_ERR("internal mem self test failed\n");
6310 return -EBUSY;
6311 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006312
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006313 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006314 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6315 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6316 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006318 bp->port.need_hw_lock = 1;
6319 break;
6320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006321 default:
6322 break;
6323 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006324
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006325 bnx2x_setup_fan_failure_detection(bp);
6326
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006327 /* clear PXP2 attentions */
6328 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006329
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006330 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006331
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006332 if (!BP_NOMCP(bp)) {
6333 bnx2x_acquire_phy_lock(bp);
6334 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6335 bnx2x_release_phy_lock(bp);
6336 } else
6337 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6338
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006339 return 0;
6340}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006341
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006342static int bnx2x_init_port(struct bnx2x *bp)
6343{
6344 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006345 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006346 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006347 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006348
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006349 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6350
6351 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006352
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006353 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006354 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006355
6356 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6357 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6358 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006359 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006360
Michael Chan37b091b2009-10-10 13:46:55 +00006361#ifdef BCM_CNIC
6362 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006363
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006364 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006365 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6366 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006367#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006368 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006369
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006370 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006371 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6372 /* no pause for emulation and FPGA */
6373 low = 0;
6374 high = 513;
6375 } else {
6376 if (IS_E1HMF(bp))
6377 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6378 else if (bp->dev->mtu > 4096) {
6379 if (bp->flags & ONE_PORT_FLAG)
6380 low = 160;
6381 else {
6382 val = bp->dev->mtu;
6383 /* (24*1024 + val*4)/256 */
6384 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6385 }
6386 } else
6387 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6388 high = low + 56; /* 14*1024/256 */
6389 }
6390 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6391 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6392
6393
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006394 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006395
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006396 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006397 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006398 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006399 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006400
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006401 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6402 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6403 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6404 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006405
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006406 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006407 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006408
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006409 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006410
6411 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006412 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006413
6414 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006415 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006416 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006417 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006418
6419 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006420 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006421 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006422 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006423
Michael Chan37b091b2009-10-10 13:46:55 +00006424#ifdef BCM_CNIC
6425 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006426#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006427 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006428 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006429
6430 if (CHIP_IS_E1(bp)) {
6431 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6432 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6433 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006434 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006435
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006436 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006437 /* init aeu_mask_attn_func_0/1:
6438 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6439 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6440 * bits 4-7 are used for "per vn group attention" */
6441 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6442 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6443
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006444 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006445 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006446 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006447 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006448 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006449
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006450 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006451
6452 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6453
6454 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006455 /* 0x2 disable e1hov, 0x1 enable */
6456 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6457 (IS_E1HMF(bp) ? 0x1 : 0x2));
6458
Eilon Greenstein1c063282009-02-12 08:36:43 +00006459 {
6460 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6461 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6462 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6463 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006464 }
6465
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006466 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006467 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006468
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006469 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6471 {
6472 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6473
6474 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6475 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6476
6477 /* The GPIO should be swapped if the swap register is
6478 set and active */
6479 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6480 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6481
6482 /* Select function upon port-swap configuration */
6483 if (port == 0) {
6484 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6485 aeu_gpio_mask = (swap_val && swap_override) ?
6486 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6488 } else {
6489 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6490 aeu_gpio_mask = (swap_val && swap_override) ?
6491 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6492 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6493 }
6494 val = REG_RD(bp, offset);
6495 /* add GPIO3 to group */
6496 val |= aeu_gpio_mask;
6497 REG_WR(bp, offset, val);
6498 }
6499 break;
6500
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006501 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006503 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006504 {
6505 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6506 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6507 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006508 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006509 REG_WR(bp, reg_addr, val);
6510 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006511 break;
6512
6513 default:
6514 break;
6515 }
6516
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006517 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006518
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006519 return 0;
6520}
6521
6522#define ILT_PER_FUNC (768/2)
6523#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6524/* the phys address is shifted right 12 bits and has an added
6525 1=valid bit added to the 53rd bit
6526 then since this is a wide register(TM)
6527 we split it into two 32 bit writes
6528 */
6529#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6530#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6531#define PXP_ONE_ILT(x) (((x) << 10) | x)
6532#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6533
Michael Chan37b091b2009-10-10 13:46:55 +00006534#ifdef BCM_CNIC
6535#define CNIC_ILT_LINES 127
6536#define CNIC_CTX_PER_ILT 16
6537#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006538#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006539#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006540
6541static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6542{
6543 int reg;
6544
6545 if (CHIP_IS_E1H(bp))
6546 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6547 else /* E1 */
6548 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6549
6550 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6551}
6552
6553static int bnx2x_init_func(struct bnx2x *bp)
6554{
6555 int port = BP_PORT(bp);
6556 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006557 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006558 int i;
6559
6560 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6561
Eilon Greenstein8badd272009-02-12 08:36:15 +00006562 /* set MSI reconfigure capability */
6563 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6564 val = REG_RD(bp, addr);
6565 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6566 REG_WR(bp, addr, val);
6567
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006568 i = FUNC_ILT_BASE(func);
6569
6570 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6571 if (CHIP_IS_E1H(bp)) {
6572 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6573 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6574 } else /* E1 */
6575 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6576 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6577
Michael Chan37b091b2009-10-10 13:46:55 +00006578#ifdef BCM_CNIC
6579 i += 1 + CNIC_ILT_LINES;
6580 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6581 if (CHIP_IS_E1(bp))
6582 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6583 else {
6584 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6585 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6586 }
6587
6588 i++;
6589 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6590 if (CHIP_IS_E1(bp))
6591 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6592 else {
6593 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6594 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6595 }
6596
6597 i++;
6598 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6599 if (CHIP_IS_E1(bp))
6600 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6601 else {
6602 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6603 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6604 }
6605
6606 /* tell the searcher where the T2 table is */
6607 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6608
6609 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6610 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6611
6612 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6613 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6614 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6615
6616 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6617#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006618
6619 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006620 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6624 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6625 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6626 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6627 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6628 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006629
6630 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6631 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6632 }
6633
6634 /* HC init per function */
6635 if (CHIP_IS_E1H(bp)) {
6636 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6637
6638 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6640 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006641 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006642
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006643 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006644 REG_WR(bp, 0x2114, 0xffffffff);
6645 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006646
6647 return 0;
6648}
6649
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006650static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6651{
6652 int i, rc = 0;
6653
6654 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6655 BP_FUNC(bp), load_code);
6656
6657 bp->dmae_ready = 0;
6658 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006659 rc = bnx2x_gunzip_init(bp);
6660 if (rc)
6661 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006662
6663 switch (load_code) {
6664 case FW_MSG_CODE_DRV_LOAD_COMMON:
6665 rc = bnx2x_init_common(bp);
6666 if (rc)
6667 goto init_hw_err;
6668 /* no break */
6669
6670 case FW_MSG_CODE_DRV_LOAD_PORT:
6671 bp->dmae_ready = 1;
6672 rc = bnx2x_init_port(bp);
6673 if (rc)
6674 goto init_hw_err;
6675 /* no break */
6676
6677 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6678 bp->dmae_ready = 1;
6679 rc = bnx2x_init_func(bp);
6680 if (rc)
6681 goto init_hw_err;
6682 break;
6683
6684 default:
6685 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6686 break;
6687 }
6688
6689 if (!BP_NOMCP(bp)) {
6690 int func = BP_FUNC(bp);
6691
6692 bp->fw_drv_pulse_wr_seq =
6693 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6694 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006695 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6696 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006697
6698 /* this needs to be done before gunzip end */
6699 bnx2x_zero_def_sb(bp);
6700 for_each_queue(bp, i)
6701 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006702#ifdef BCM_CNIC
6703 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6704#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006705
6706init_hw_err:
6707 bnx2x_gunzip_end(bp);
6708
6709 return rc;
6710}
6711
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006712static void bnx2x_free_mem(struct bnx2x *bp)
6713{
6714
6715#define BNX2X_PCI_FREE(x, y, size) \
6716 do { \
6717 if (x) { \
6718 pci_free_consistent(bp->pdev, size, x, y); \
6719 x = NULL; \
6720 y = 0; \
6721 } \
6722 } while (0)
6723
6724#define BNX2X_FREE(x) \
6725 do { \
6726 if (x) { \
6727 vfree(x); \
6728 x = NULL; \
6729 } \
6730 } while (0)
6731
6732 int i;
6733
6734 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006735 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006736 for_each_queue(bp, i) {
6737
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006738 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006739 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6740 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006741 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006742 }
6743 /* Rx */
6744 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006745
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006746 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006747 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6749 bnx2x_fp(bp, i, rx_desc_mapping),
6750 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6751
6752 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6753 bnx2x_fp(bp, i, rx_comp_mapping),
6754 sizeof(struct eth_fast_path_rx_cqe) *
6755 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006756
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006757 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006758 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6760 bnx2x_fp(bp, i, rx_sge_mapping),
6761 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6762 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006763 /* Tx */
6764 for_each_tx_queue(bp, i) {
6765
6766 /* fastpath tx rings: tx_buf tx_desc */
6767 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6768 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6769 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006770 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006771 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006772 /* end of fastpath */
6773
6774 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006775 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006776
6777 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006778 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006779
Michael Chan37b091b2009-10-10 13:46:55 +00006780#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006781 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6782 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6783 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6784 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006785 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6786 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006787#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006788 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789
6790#undef BNX2X_PCI_FREE
6791#undef BNX2X_KFREE
6792}
6793
6794static int bnx2x_alloc_mem(struct bnx2x *bp)
6795{
6796
6797#define BNX2X_PCI_ALLOC(x, y, size) \
6798 do { \
6799 x = pci_alloc_consistent(bp->pdev, size, y); \
6800 if (x == NULL) \
6801 goto alloc_mem_err; \
6802 memset(x, 0, size); \
6803 } while (0)
6804
6805#define BNX2X_ALLOC(x, size) \
6806 do { \
6807 x = vmalloc(size); \
6808 if (x == NULL) \
6809 goto alloc_mem_err; \
6810 memset(x, 0, size); \
6811 } while (0)
6812
6813 int i;
6814
6815 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006816 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006817 for_each_queue(bp, i) {
6818 bnx2x_fp(bp, i, bp) = bp;
6819
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006820 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006821 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6822 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006823 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006824 }
6825 /* Rx */
6826 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006827
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006828 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006829 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6830 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6832 &bnx2x_fp(bp, i, rx_desc_mapping),
6833 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6834
6835 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6836 &bnx2x_fp(bp, i, rx_comp_mapping),
6837 sizeof(struct eth_fast_path_rx_cqe) *
6838 NUM_RCQ_BD);
6839
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006840 /* SGE ring */
6841 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6842 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6843 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6844 &bnx2x_fp(bp, i, rx_sge_mapping),
6845 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006846 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006847 /* Tx */
6848 for_each_tx_queue(bp, i) {
6849
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006850 /* fastpath tx rings: tx_buf tx_desc */
6851 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6852 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6853 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6854 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006855 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006856 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006857 /* end of fastpath */
6858
6859 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6860 sizeof(struct host_def_status_block));
6861
6862 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6863 sizeof(struct bnx2x_slowpath));
6864
Michael Chan37b091b2009-10-10 13:46:55 +00006865#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006866 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6867
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006868 /* allocate searcher T2 table
6869 we allocate 1/4 of alloc num for T2
6870 (which is not entered into the ILT) */
6871 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6872
Michael Chan37b091b2009-10-10 13:46:55 +00006873 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006874 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006875 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006876
Michael Chan37b091b2009-10-10 13:46:55 +00006877 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006878 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6879
6880 /* QM queues (128*MAX_CONN) */
6881 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006882
6883 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6884 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006885#endif
6886
6887 /* Slow path ring */
6888 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6889
6890 return 0;
6891
6892alloc_mem_err:
6893 bnx2x_free_mem(bp);
6894 return -ENOMEM;
6895
6896#undef BNX2X_PCI_ALLOC
6897#undef BNX2X_ALLOC
6898}
6899
6900static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6901{
6902 int i;
6903
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006904 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905 struct bnx2x_fastpath *fp = &bp->fp[i];
6906
6907 u16 bd_cons = fp->tx_bd_cons;
6908 u16 sw_prod = fp->tx_pkt_prod;
6909 u16 sw_cons = fp->tx_pkt_cons;
6910
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006911 while (sw_cons != sw_prod) {
6912 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6913 sw_cons++;
6914 }
6915 }
6916}
6917
6918static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6919{
6920 int i, j;
6921
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006922 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006923 struct bnx2x_fastpath *fp = &bp->fp[j];
6924
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006925 for (i = 0; i < NUM_RX_BD; i++) {
6926 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6927 struct sk_buff *skb = rx_buf->skb;
6928
6929 if (skb == NULL)
6930 continue;
6931
6932 pci_unmap_single(bp->pdev,
6933 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006934 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006935
6936 rx_buf->skb = NULL;
6937 dev_kfree_skb(skb);
6938 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006939 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006940 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6941 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006942 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006943 }
6944}
6945
6946static void bnx2x_free_skbs(struct bnx2x *bp)
6947{
6948 bnx2x_free_tx_skbs(bp);
6949 bnx2x_free_rx_skbs(bp);
6950}
6951
6952static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6953{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006954 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006955
6956 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006957 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006958 bp->msix_table[0].vector);
6959
Michael Chan37b091b2009-10-10 13:46:55 +00006960#ifdef BCM_CNIC
6961 offset++;
6962#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006963 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006964 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006965 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006966 bnx2x_fp(bp, i, state));
6967
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006968 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006969 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006970}
6971
6972static void bnx2x_free_irq(struct bnx2x *bp)
6973{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006974 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006975 bnx2x_free_msix_irqs(bp);
6976 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006977 bp->flags &= ~USING_MSIX_FLAG;
6978
Eilon Greenstein8badd272009-02-12 08:36:15 +00006979 } else if (bp->flags & USING_MSI_FLAG) {
6980 free_irq(bp->pdev->irq, bp->dev);
6981 pci_disable_msi(bp->pdev);
6982 bp->flags &= ~USING_MSI_FLAG;
6983
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006984 } else
6985 free_irq(bp->pdev->irq, bp->dev);
6986}
6987
6988static int bnx2x_enable_msix(struct bnx2x *bp)
6989{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006990 int i, rc, offset = 1;
6991 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006992
Eilon Greenstein8badd272009-02-12 08:36:15 +00006993 bp->msix_table[0].entry = igu_vec;
6994 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006995
Michael Chan37b091b2009-10-10 13:46:55 +00006996#ifdef BCM_CNIC
6997 igu_vec = BP_L_ID(bp) + offset;
6998 bp->msix_table[1].entry = igu_vec;
6999 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7000 offset++;
7001#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007002 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007003 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007004 bp->msix_table[i + offset].entry = igu_vec;
7005 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7006 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007007 }
7008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007009 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007010 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007011 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007012 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7013 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007014 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007015
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007016 bp->flags |= USING_MSIX_FLAG;
7017
7018 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007019}
7020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007021static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7022{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007023 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007024
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007025 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7026 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007027 if (rc) {
7028 BNX2X_ERR("request sp irq failed\n");
7029 return -EBUSY;
7030 }
7031
Michael Chan37b091b2009-10-10 13:46:55 +00007032#ifdef BCM_CNIC
7033 offset++;
7034#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007035 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007036 struct bnx2x_fastpath *fp = &bp->fp[i];
7037
Eilon Greensteinca003922009-08-12 22:53:28 -07007038 if (i < bp->num_rx_queues)
7039 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7040 else
7041 sprintf(fp->name, "%s-tx-%d",
7042 bp->dev->name, i - bp->num_rx_queues);
7043
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007044 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007045 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007046 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007047 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007048 bnx2x_free_msix_irqs(bp);
7049 return -EBUSY;
7050 }
7051
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007052 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007053 }
7054
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007055 i = BNX2X_NUM_QUEUES(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007056 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7057 " ... fp[%d] %d\n",
7058 bp->dev->name, bp->msix_table[0].vector,
7059 0, bp->msix_table[offset].vector,
7060 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007061
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007062 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007063}
7064
Eilon Greenstein8badd272009-02-12 08:36:15 +00007065static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007066{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007067 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007068
Eilon Greenstein8badd272009-02-12 08:36:15 +00007069 rc = pci_enable_msi(bp->pdev);
7070 if (rc) {
7071 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7072 return -1;
7073 }
7074 bp->flags |= USING_MSI_FLAG;
7075
7076 return 0;
7077}
7078
7079static int bnx2x_req_irq(struct bnx2x *bp)
7080{
7081 unsigned long flags;
7082 int rc;
7083
7084 if (bp->flags & USING_MSI_FLAG)
7085 flags = 0;
7086 else
7087 flags = IRQF_SHARED;
7088
7089 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007090 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007091 if (!rc)
7092 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7093
7094 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007095}
7096
Yitchak Gertner65abd742008-08-25 15:26:24 -07007097static void bnx2x_napi_enable(struct bnx2x *bp)
7098{
7099 int i;
7100
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007101 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007102 napi_enable(&bnx2x_fp(bp, i, napi));
7103}
7104
7105static void bnx2x_napi_disable(struct bnx2x *bp)
7106{
7107 int i;
7108
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007109 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007110 napi_disable(&bnx2x_fp(bp, i, napi));
7111}
7112
7113static void bnx2x_netif_start(struct bnx2x *bp)
7114{
Eilon Greensteine1510702009-07-21 05:47:41 +00007115 int intr_sem;
7116
7117 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7118 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7119
7120 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007121 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007122 bnx2x_napi_enable(bp);
7123 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007124 if (bp->state == BNX2X_STATE_OPEN)
7125 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007126 }
7127 }
7128}
7129
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007130static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007131{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007132 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007133 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007134 netif_tx_disable(bp->dev);
7135 bp->dev->trans_start = jiffies; /* prevent tx timeout */
Yitchak Gertner65abd742008-08-25 15:26:24 -07007136}
7137
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007138/*
7139 * Init service functions
7140 */
7141
Michael Chane665bfd2009-10-10 13:46:54 +00007142/**
7143 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7144 *
7145 * @param bp driver descriptor
7146 * @param set set or clear an entry (1 or 0)
7147 * @param mac pointer to a buffer containing a MAC
7148 * @param cl_bit_vec bit vector of clients to register a MAC for
7149 * @param cam_offset offset in a CAM to use
7150 * @param with_bcast set broadcast MAC as well
7151 */
7152static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7153 u32 cl_bit_vec, u8 cam_offset,
7154 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007155{
7156 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007157 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007158
7159 /* CAM allocation
7160 * unicasts 0-31:port0 32-63:port1
7161 * multicast 64-127:port0 128-191:port1
7162 */
Michael Chane665bfd2009-10-10 13:46:54 +00007163 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7164 config->hdr.offset = cam_offset;
7165 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007166 config->hdr.reserved1 = 0;
7167
7168 /* primary MAC */
7169 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007170 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007171 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007172 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007173 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007174 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007175 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007176 if (set)
7177 config->config_table[0].target_table_entry.flags = 0;
7178 else
7179 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007180 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007181 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007182 config->config_table[0].target_table_entry.vlan_id = 0;
7183
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007184 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7185 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007186 config->config_table[0].cam_entry.msb_mac_addr,
7187 config->config_table[0].cam_entry.middle_mac_addr,
7188 config->config_table[0].cam_entry.lsb_mac_addr);
7189
7190 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007191 if (with_bcast) {
7192 config->config_table[1].cam_entry.msb_mac_addr =
7193 cpu_to_le16(0xffff);
7194 config->config_table[1].cam_entry.middle_mac_addr =
7195 cpu_to_le16(0xffff);
7196 config->config_table[1].cam_entry.lsb_mac_addr =
7197 cpu_to_le16(0xffff);
7198 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7199 if (set)
7200 config->config_table[1].target_table_entry.flags =
7201 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7202 else
7203 CAM_INVALIDATE(config->config_table[1]);
7204 config->config_table[1].target_table_entry.clients_bit_vector =
7205 cpu_to_le32(cl_bit_vec);
7206 config->config_table[1].target_table_entry.vlan_id = 0;
7207 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007208
7209 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7210 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7211 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7212}
7213
Michael Chane665bfd2009-10-10 13:46:54 +00007214/**
7215 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7216 *
7217 * @param bp driver descriptor
7218 * @param set set or clear an entry (1 or 0)
7219 * @param mac pointer to a buffer containing a MAC
7220 * @param cl_bit_vec bit vector of clients to register a MAC for
7221 * @param cam_offset offset in a CAM to use
7222 */
7223static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7224 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007225{
7226 struct mac_configuration_cmd_e1h *config =
7227 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7228
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007229 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007230 config->hdr.offset = cam_offset;
7231 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007232 config->hdr.reserved1 = 0;
7233
7234 /* primary MAC */
7235 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007236 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007237 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007238 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007239 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007240 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007241 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007242 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007243 config->config_table[0].vlan_id = 0;
7244 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007245 if (set)
7246 config->config_table[0].flags = BP_PORT(bp);
7247 else
7248 config->config_table[0].flags =
7249 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007250
Michael Chane665bfd2009-10-10 13:46:54 +00007251 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007252 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007253 config->config_table[0].msb_mac_addr,
7254 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007255 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007256
7257 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7258 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7259 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7260}
7261
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007262static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7263 int *state_p, int poll)
7264{
7265 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007266 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007267
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007268 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7269 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007270
7271 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007272 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007273 if (poll) {
7274 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007275 /* if index is different from 0
7276 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007277 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007278 */
7279 if (idx)
7280 bnx2x_rx_int(&bp->fp[idx], 10);
7281 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007282
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007283 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007284 if (*state_p == state) {
7285#ifdef BNX2X_STOP_ON_ERROR
7286 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7287#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007288 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007289 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007290
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007291 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007292
7293 if (bp->panic)
7294 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007295 }
7296
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007297 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007298 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7299 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007300#ifdef BNX2X_STOP_ON_ERROR
7301 bnx2x_panic();
7302#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007303
Eliezer Tamir49d66772008-02-28 11:53:13 -08007304 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007305}
7306
Michael Chane665bfd2009-10-10 13:46:54 +00007307static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7308{
7309 bp->set_mac_pending++;
7310 smp_wmb();
7311
7312 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7313 (1 << bp->fp->cl_id), BP_FUNC(bp));
7314
7315 /* Wait for a completion */
7316 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7317}
7318
7319static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7320{
7321 bp->set_mac_pending++;
7322 smp_wmb();
7323
7324 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7325 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7326 1);
7327
7328 /* Wait for a completion */
7329 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7330}
7331
Michael Chan993ac7b2009-10-10 13:46:56 +00007332#ifdef BCM_CNIC
7333/**
7334 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7335 * MAC(s). This function will wait until the ramdord completion
7336 * returns.
7337 *
7338 * @param bp driver handle
7339 * @param set set or clear the CAM entry
7340 *
7341 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7342 */
7343static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7344{
7345 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7346
7347 bp->set_mac_pending++;
7348 smp_wmb();
7349
7350 /* Send a SET_MAC ramrod */
7351 if (CHIP_IS_E1(bp))
7352 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7353 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7354 1);
7355 else
7356 /* CAM allocation for E1H
7357 * unicasts: by func number
7358 * multicast: 20+FUNC*20, 20 each
7359 */
7360 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7361 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7362
7363 /* Wait for a completion when setting */
7364 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7365
7366 return 0;
7367}
7368#endif
7369
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370static int bnx2x_setup_leading(struct bnx2x *bp)
7371{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007372 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007373
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007374 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007375 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007376
7377 /* SETUP ramrod */
7378 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7379
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007380 /* Wait for completion */
7381 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007382
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007383 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007384}
7385
7386static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7387{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007388 struct bnx2x_fastpath *fp = &bp->fp[index];
7389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007390 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007391 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007392
Eliezer Tamir228241e2008-02-28 11:56:57 -08007393 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007394 fp->state = BNX2X_FP_STATE_OPENING;
7395 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7396 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007397
7398 /* Wait for completion */
7399 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007400 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007401}
7402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007403static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007404
Eilon Greensteinca003922009-08-12 22:53:28 -07007405static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7406 int *num_tx_queues_out)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007407{
Eilon Greensteinca003922009-08-12 22:53:28 -07007408 int _num_rx_queues = 0, _num_tx_queues = 0;
7409
7410 switch (bp->multi_mode) {
7411 case ETH_RSS_MODE_DISABLED:
7412 _num_rx_queues = 1;
7413 _num_tx_queues = 1;
7414 break;
7415
7416 case ETH_RSS_MODE_REGULAR:
7417 if (num_rx_queues)
7418 _num_rx_queues = min_t(u32, num_rx_queues,
7419 BNX2X_MAX_QUEUES(bp));
7420 else
7421 _num_rx_queues = min_t(u32, num_online_cpus(),
7422 BNX2X_MAX_QUEUES(bp));
7423
7424 if (num_tx_queues)
7425 _num_tx_queues = min_t(u32, num_tx_queues,
7426 BNX2X_MAX_QUEUES(bp));
7427 else
7428 _num_tx_queues = min_t(u32, num_online_cpus(),
7429 BNX2X_MAX_QUEUES(bp));
7430
7431 /* There must be not more Tx queues than Rx queues */
7432 if (_num_tx_queues > _num_rx_queues) {
7433 BNX2X_ERR("number of tx queues (%d) > "
7434 "number of rx queues (%d)"
7435 " defaulting to %d\n",
7436 _num_tx_queues, _num_rx_queues,
7437 _num_rx_queues);
7438 _num_tx_queues = _num_rx_queues;
7439 }
7440 break;
7441
7442
7443 default:
7444 _num_rx_queues = 1;
7445 _num_tx_queues = 1;
7446 break;
7447 }
7448
7449 *num_rx_queues_out = _num_rx_queues;
7450 *num_tx_queues_out = _num_tx_queues;
7451}
7452
7453static int bnx2x_set_int_mode(struct bnx2x *bp)
7454{
7455 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007456
Eilon Greenstein8badd272009-02-12 08:36:15 +00007457 switch (int_mode) {
7458 case INT_MODE_INTx:
7459 case INT_MODE_MSI:
Eilon Greensteinca003922009-08-12 22:53:28 -07007460 bp->num_rx_queues = 1;
7461 bp->num_tx_queues = 1;
7462 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007463 break;
7464
7465 case INT_MODE_MSIX:
7466 default:
Eilon Greensteinca003922009-08-12 22:53:28 -07007467 /* Set interrupt mode according to bp->multi_mode value */
7468 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7469 &bp->num_tx_queues);
7470
7471 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007472 bp->num_rx_queues, bp->num_tx_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007473
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007474 /* if we can't use MSI-X we only need one fp,
7475 * so try to enable MSI-X with the requested number of fp's
7476 * and fallback to MSI or legacy INTx with one fp
7477 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007478 rc = bnx2x_enable_msix(bp);
7479 if (rc) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007480 /* failed to enable MSI-X */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007481 if (bp->multi_mode)
7482 BNX2X_ERR("Multi requested but failed to "
Eilon Greensteinca003922009-08-12 22:53:28 -07007483 "enable MSI-X (rx %d tx %d), "
7484 "set number of queues to 1\n",
7485 bp->num_rx_queues, bp->num_tx_queues);
7486 bp->num_rx_queues = 1;
7487 bp->num_tx_queues = 1;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007488 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007489 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007490 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007491 bp->dev->real_num_tx_queues = bp->num_tx_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007492 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007493}
7494
Michael Chan993ac7b2009-10-10 13:46:56 +00007495#ifdef BCM_CNIC
7496static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7497static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7498#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007499
7500/* must be called with rtnl_lock */
7501static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7502{
7503 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007504 int i, rc;
7505
Eilon Greenstein8badd272009-02-12 08:36:15 +00007506#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007507 if (unlikely(bp->panic))
7508 return -EPERM;
7509#endif
7510
7511 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7512
Eilon Greensteinca003922009-08-12 22:53:28 -07007513 rc = bnx2x_set_int_mode(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007514
7515 if (bnx2x_alloc_mem(bp))
7516 return -ENOMEM;
7517
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007518 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007519 bnx2x_fp(bp, i, disable_tpa) =
7520 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7521
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007522 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007523 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7524 bnx2x_poll, 128);
7525
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007526 bnx2x_napi_enable(bp);
7527
7528 if (bp->flags & USING_MSIX_FLAG) {
7529 rc = bnx2x_req_msix_irqs(bp);
7530 if (rc) {
7531 pci_disable_msix(bp->pdev);
7532 goto load_error1;
7533 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007534 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007535 /* Fall to INTx if failed to enable MSI-X due to lack of
7536 memory (in bnx2x_set_int_mode()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007537 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7538 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007539 bnx2x_ack_int(bp);
7540 rc = bnx2x_req_irq(bp);
7541 if (rc) {
7542 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007543 if (bp->flags & USING_MSI_FLAG)
7544 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007545 goto load_error1;
7546 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007547 if (bp->flags & USING_MSI_FLAG) {
7548 bp->dev->irq = bp->pdev->irq;
7549 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7550 bp->dev->name, bp->pdev->irq);
7551 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007552 }
7553
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007554 /* Send LOAD_REQUEST command to MCP
7555 Returns the type of LOAD command:
7556 if it is the first port to be initialized
7557 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007558 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007559 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007560 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7561 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007562 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007563 rc = -EBUSY;
7564 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007565 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007566 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7567 rc = -EBUSY; /* other port in diagnostic mode */
7568 goto load_error2;
7569 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007570
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007571 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007572 int port = BP_PORT(bp);
7573
Eilon Greensteinf5372252009-02-12 08:38:30 +00007574 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007575 load_count[0], load_count[1], load_count[2]);
7576 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007577 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007578 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007579 load_count[0], load_count[1], load_count[2]);
7580 if (load_count[0] == 1)
7581 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007582 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007583 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7584 else
7585 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007586 }
7587
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007588 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7589 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7590 bp->port.pmf = 1;
7591 else
7592 bp->port.pmf = 0;
7593 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7594
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007595 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007596 rc = bnx2x_init_hw(bp, load_code);
7597 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007598 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007599 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007600 }
7601
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007602 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007603 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007604
Eilon Greenstein2691d512009-08-12 08:22:08 +00007605 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7606 (bp->common.shmem2_base))
7607 SHMEM2_WR(bp, dcc_support,
7608 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7609 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007611 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007612 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007613 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7614 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007615 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007616 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007617 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007618 }
7619 }
7620
7621 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7622
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007623 rc = bnx2x_setup_leading(bp);
7624 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007625 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007626#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007627 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007628#else
7629 bp->panic = 1;
7630 return -EBUSY;
7631#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007632 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007633
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007634 if (CHIP_IS_E1H(bp))
7635 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007636 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007637 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007638 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007639
Eilon Greensteinca003922009-08-12 22:53:28 -07007640 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007641#ifdef BCM_CNIC
7642 /* Enable Timer scan */
7643 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7644#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007645 for_each_nondefault_queue(bp, i) {
7646 rc = bnx2x_setup_multi(bp, i);
7647 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007648#ifdef BCM_CNIC
7649 goto load_error4;
7650#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007651 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007652#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007653 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007654
Eilon Greensteinca003922009-08-12 22:53:28 -07007655 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007656 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007657 else
Michael Chane665bfd2009-10-10 13:46:54 +00007658 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007659#ifdef BCM_CNIC
7660 /* Set iSCSI L2 MAC */
7661 mutex_lock(&bp->cnic_mutex);
7662 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7663 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7664 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7665 }
7666 mutex_unlock(&bp->cnic_mutex);
7667#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007668 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007669
7670 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007671 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007672
7673 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007674 switch (load_mode) {
7675 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007676 if (bp->state == BNX2X_STATE_OPEN) {
7677 /* Tx queue should be only reenabled */
7678 netif_tx_wake_all_queues(bp->dev);
7679 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007680 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007681 bnx2x_set_rx_mode(bp->dev);
7682 break;
7683
7684 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007685 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007686 if (bp->state != BNX2X_STATE_OPEN)
7687 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007688 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007689 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007690 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007692 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007693 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007694 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007695 bp->state = BNX2X_STATE_DIAG;
7696 break;
7697
7698 default:
7699 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007700 }
7701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007702 if (!bp->port.pmf)
7703 bnx2x__link_status_update(bp);
7704
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007705 /* start the timer */
7706 mod_timer(&bp->timer, jiffies + bp->current_interval);
7707
Michael Chan993ac7b2009-10-10 13:46:56 +00007708#ifdef BCM_CNIC
7709 bnx2x_setup_cnic_irq_info(bp);
7710 if (bp->state == BNX2X_STATE_OPEN)
7711 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7712#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007714 return 0;
7715
Michael Chan37b091b2009-10-10 13:46:55 +00007716#ifdef BCM_CNIC
7717load_error4:
7718 /* Disable Timer scan */
7719 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7720#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007721load_error3:
7722 bnx2x_int_disable_sync(bp, 1);
7723 if (!BP_NOMCP(bp)) {
7724 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7725 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7726 }
7727 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007728 /* Free SKBs, SGEs, TPA pool and driver internals */
7729 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007730 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007731 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007732load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007733 /* Release IRQs */
7734 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007735load_error1:
7736 bnx2x_napi_disable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007737 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007738 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007739 bnx2x_free_mem(bp);
7740
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007741 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007742}
7743
7744static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7745{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007746 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007747 int rc;
7748
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007749 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007750 fp->state = BNX2X_FP_STATE_HALTING;
7751 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007752
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007753 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007754 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007755 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007756 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007757 return rc;
7758
7759 /* delete cfc entry */
7760 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7761
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007762 /* Wait for completion */
7763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007764 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007765 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007766}
7767
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007768static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007769{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007770 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007771 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007772 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007773 int cnt = 500;
7774 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007775
7776 might_sleep();
7777
7778 /* Send HALT ramrod */
7779 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007781
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007782 /* Wait for completion */
7783 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7784 &(bp->fp[0].state), 1);
7785 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007786 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007787
Eliezer Tamir49d66772008-02-28 11:53:13 -08007788 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007789
Eliezer Tamir228241e2008-02-28 11:56:57 -08007790 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007791 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7792
Eliezer Tamir49d66772008-02-28 11:53:13 -08007793 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007794 we are going to reset the chip anyway
7795 so there is not much to do if this times out
7796 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007797 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007798 if (!cnt) {
7799 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7800 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7801 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7802#ifdef BNX2X_STOP_ON_ERROR
7803 bnx2x_panic();
7804#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007805 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007806 break;
7807 }
7808 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007809 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007810 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007811 }
7812 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7813 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007814
7815 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007816}
7817
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007818static void bnx2x_reset_func(struct bnx2x *bp)
7819{
7820 int port = BP_PORT(bp);
7821 int func = BP_FUNC(bp);
7822 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007823
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007824 /* Configure IGU */
7825 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7826 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7827
Michael Chan37b091b2009-10-10 13:46:55 +00007828#ifdef BCM_CNIC
7829 /* Disable Timer scan */
7830 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7831 /*
7832 * Wait for at least 10ms and up to 2 second for the timers scan to
7833 * complete
7834 */
7835 for (i = 0; i < 200; i++) {
7836 msleep(10);
7837 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7838 break;
7839 }
7840#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007841 /* Clear ILT */
7842 base = FUNC_ILT_BASE(func);
7843 for (i = base; i < base + ILT_PER_FUNC; i++)
7844 bnx2x_ilt_wr(bp, i, 0);
7845}
7846
7847static void bnx2x_reset_port(struct bnx2x *bp)
7848{
7849 int port = BP_PORT(bp);
7850 u32 val;
7851
7852 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7853
7854 /* Do not rcv packets to BRB */
7855 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7856 /* Do not direct rcv packets that are not for MCP to the BRB */
7857 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7858 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7859
7860 /* Configure AEU */
7861 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7862
7863 msleep(100);
7864 /* Check for BRB port occupancy */
7865 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7866 if (val)
7867 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007868 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007869
7870 /* TODO: Close Doorbell port? */
7871}
7872
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007873static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7874{
7875 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7876 BP_FUNC(bp), reset_code);
7877
7878 switch (reset_code) {
7879 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7880 bnx2x_reset_port(bp);
7881 bnx2x_reset_func(bp);
7882 bnx2x_reset_common(bp);
7883 break;
7884
7885 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7886 bnx2x_reset_port(bp);
7887 bnx2x_reset_func(bp);
7888 break;
7889
7890 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7891 bnx2x_reset_func(bp);
7892 break;
7893
7894 default:
7895 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7896 break;
7897 }
7898}
7899
Eilon Greenstein33471622008-08-13 15:59:08 -07007900/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007901static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007902{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007903 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007904 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007905 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007906
Michael Chan993ac7b2009-10-10 13:46:56 +00007907#ifdef BCM_CNIC
7908 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7909#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007910 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7911
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007912 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007913 bp->rx_mode = BNX2X_RX_MODE_NONE;
7914 bnx2x_set_storm_rx_mode(bp);
7915
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007916 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007917 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007918
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007919 del_timer_sync(&bp->timer);
7920 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7921 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007922 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007923
Eilon Greenstein70b99862009-01-14 06:43:48 +00007924 /* Release IRQs */
7925 bnx2x_free_irq(bp);
7926
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007927 /* Wait until tx fastpath tasks complete */
7928 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007929 struct bnx2x_fastpath *fp = &bp->fp[i];
7930
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007931 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007932 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007933
Eilon Greenstein7961f792009-03-02 07:59:31 +00007934 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007935 if (!cnt) {
7936 BNX2X_ERR("timeout waiting for queue[%d]\n",
7937 i);
7938#ifdef BNX2X_STOP_ON_ERROR
7939 bnx2x_panic();
7940 return -EBUSY;
7941#else
7942 break;
7943#endif
7944 }
7945 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007946 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007947 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007948 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007949 /* Give HW time to discard old tx messages */
7950 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007951
Yitchak Gertner65abd742008-08-25 15:26:24 -07007952 if (CHIP_IS_E1(bp)) {
7953 struct mac_configuration_cmd *config =
7954 bnx2x_sp(bp, mcast_config);
7955
Michael Chane665bfd2009-10-10 13:46:54 +00007956 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007957
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007958 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007959 CAM_INVALIDATE(config->config_table[i]);
7960
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007961 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007962 if (CHIP_REV_IS_SLOW(bp))
7963 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7964 else
7965 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007966 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007967 config->hdr.reserved1 = 0;
7968
Michael Chane665bfd2009-10-10 13:46:54 +00007969 bp->set_mac_pending++;
7970 smp_wmb();
7971
Yitchak Gertner65abd742008-08-25 15:26:24 -07007972 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7973 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7974 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7975
7976 } else { /* E1H */
7977 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7978
Michael Chane665bfd2009-10-10 13:46:54 +00007979 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007980
7981 for (i = 0; i < MC_HASH_SIZE; i++)
7982 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007983
7984 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007985 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007986#ifdef BCM_CNIC
7987 /* Clear iSCSI L2 MAC */
7988 mutex_lock(&bp->cnic_mutex);
7989 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7990 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7991 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7992 }
7993 mutex_unlock(&bp->cnic_mutex);
7994#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007995
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007996 if (unload_mode == UNLOAD_NORMAL)
7997 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007998
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007999 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008001
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008002 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008003 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008004 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008005 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008006 /* The mac address is written to entries 1-4 to
8007 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008008 u8 entry = (BP_E1HVN(bp) + 1)*8;
8009
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008010 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07008011 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008012
8013 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8014 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07008015 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008016
8017 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008018
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008019 } else
8020 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8021
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008022 /* Close multi and leading connections
8023 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008024 for_each_nondefault_queue(bp, i)
8025 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08008026 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008027
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008028 rc = bnx2x_stop_leading(bp);
8029 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008030 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008031#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008032 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008033#else
8034 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008035#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08008036 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008037
Eliezer Tamir228241e2008-02-28 11:56:57 -08008038unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008039 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08008040 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008041 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00008042 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008043 load_count[0], load_count[1], load_count[2]);
8044 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008045 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00008046 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008047 load_count[0], load_count[1], load_count[2]);
8048 if (load_count[0] == 0)
8049 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008050 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008051 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8052 else
8053 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8054 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008055
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008056 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8057 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8058 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008059
8060 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08008061 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008062
8063 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008064 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008065 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00008066
Eilon Greenstein9a035442008-11-03 16:45:55 -08008067 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008068
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008069 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008070 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008071 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008072 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008073 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008074 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008075 bnx2x_free_mem(bp);
8076
8077 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008078
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008079 netif_carrier_off(bp->dev);
8080
8081 return 0;
8082}
8083
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008084static void bnx2x_reset_task(struct work_struct *work)
8085{
8086 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8087
8088#ifdef BNX2X_STOP_ON_ERROR
8089 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8090 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008091 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008092 return;
8093#endif
8094
8095 rtnl_lock();
8096
8097 if (!netif_running(bp->dev))
8098 goto reset_task_exit;
8099
8100 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8101 bnx2x_nic_load(bp, LOAD_NORMAL);
8102
8103reset_task_exit:
8104 rtnl_unlock();
8105}
8106
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008107/* end of nic load/unload */
8108
8109/* ethtool_ops */
8110
8111/*
8112 * Init service functions
8113 */
8114
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008115static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8116{
8117 switch (func) {
8118 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8119 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8120 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8121 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8122 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8123 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8124 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8125 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8126 default:
8127 BNX2X_ERR("Unsupported function index: %d\n", func);
8128 return (u32)(-1);
8129 }
8130}
8131
8132static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8133{
8134 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8135
8136 /* Flush all outstanding writes */
8137 mmiowb();
8138
8139 /* Pretend to be function 0 */
8140 REG_WR(bp, reg, 0);
8141 /* Flush the GRC transaction (in the chip) */
8142 new_val = REG_RD(bp, reg);
8143 if (new_val != 0) {
8144 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8145 new_val);
8146 BUG();
8147 }
8148
8149 /* From now we are in the "like-E1" mode */
8150 bnx2x_int_disable(bp);
8151
8152 /* Flush all outstanding writes */
8153 mmiowb();
8154
8155 /* Restore the original funtion settings */
8156 REG_WR(bp, reg, orig_func);
8157 new_val = REG_RD(bp, reg);
8158 if (new_val != orig_func) {
8159 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8160 orig_func, new_val);
8161 BUG();
8162 }
8163}
8164
8165static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8166{
8167 if (CHIP_IS_E1H(bp))
8168 bnx2x_undi_int_disable_e1h(bp, func);
8169 else
8170 bnx2x_int_disable(bp);
8171}
8172
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008173static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008174{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008175 u32 val;
8176
8177 /* Check if there is any driver already loaded */
8178 val = REG_RD(bp, MISC_REG_UNPREPARED);
8179 if (val == 0x1) {
8180 /* Check if it is the UNDI driver
8181 * UNDI driver initializes CID offset for normal bell to 0x7
8182 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008183 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008184 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8185 if (val == 0x7) {
8186 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008187 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008188 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008189 u32 swap_en;
8190 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008191
Eilon Greensteinb4661732009-01-14 06:43:56 +00008192 /* clear the UNDI indication */
8193 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8194
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008195 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8196
8197 /* try unload UNDI on port 0 */
8198 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008199 bp->fw_seq =
8200 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8201 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008202 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008203
8204 /* if UNDI is loaded on the other port */
8205 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8206
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008207 /* send "DONE" for previous unload */
8208 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8209
8210 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008211 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008212 bp->fw_seq =
8213 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8214 DRV_MSG_SEQ_NUMBER_MASK);
8215 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008216
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008217 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008218 }
8219
Eilon Greensteinb4661732009-01-14 06:43:56 +00008220 /* now it's safe to release the lock */
8221 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8222
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008223 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008224
8225 /* close input traffic and wait for it */
8226 /* Do not rcv packets to BRB */
8227 REG_WR(bp,
8228 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8229 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8230 /* Do not direct rcv packets that are not for MCP to
8231 * the BRB */
8232 REG_WR(bp,
8233 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8234 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8235 /* clear AEU */
8236 REG_WR(bp,
8237 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8238 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8239 msleep(10);
8240
8241 /* save NIG port swap info */
8242 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8243 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008244 /* reset device */
8245 REG_WR(bp,
8246 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008247 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008248 REG_WR(bp,
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8250 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008251 /* take the NIG out of reset and restore swap values */
8252 REG_WR(bp,
8253 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8254 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8255 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8256 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8257
8258 /* send unload done to the MCP */
8259 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8260
8261 /* restore our func and fw_seq */
8262 bp->func = func;
8263 bp->fw_seq =
8264 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8265 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008266
8267 } else
8268 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008269 }
8270}
8271
8272static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8273{
8274 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008275 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008276
8277 /* Get the chip revision id and number. */
8278 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8279 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8280 id = ((val & 0xffff) << 16);
8281 val = REG_RD(bp, MISC_REG_CHIP_REV);
8282 id |= ((val & 0xf) << 12);
8283 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8284 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008285 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008286 id |= (val & 0xf);
8287 bp->common.chip_id = id;
8288 bp->link_params.chip_id = bp->common.chip_id;
8289 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8290
Eilon Greenstein1c063282009-02-12 08:36:43 +00008291 val = (REG_RD(bp, 0x2874) & 0x55);
8292 if ((bp->common.chip_id & 0x1) ||
8293 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8294 bp->flags |= ONE_PORT_FLAG;
8295 BNX2X_DEV_INFO("single port device\n");
8296 }
8297
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008298 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8299 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8300 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8301 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8302 bp->common.flash_size, bp->common.flash_size);
8303
8304 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008305 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008306 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008307 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8308 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008309
8310 if (!bp->common.shmem_base ||
8311 (bp->common.shmem_base < 0xA0000) ||
8312 (bp->common.shmem_base >= 0xC0000)) {
8313 BNX2X_DEV_INFO("MCP not active\n");
8314 bp->flags |= NO_MCP_FLAG;
8315 return;
8316 }
8317
8318 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8319 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8320 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8321 BNX2X_ERR("BAD MCP validity signature\n");
8322
8323 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008324 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008325
8326 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8327 SHARED_HW_CFG_LED_MODE_MASK) >>
8328 SHARED_HW_CFG_LED_MODE_SHIFT);
8329
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008330 bp->link_params.feature_config_flags = 0;
8331 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8332 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8333 bp->link_params.feature_config_flags |=
8334 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8335 else
8336 bp->link_params.feature_config_flags &=
8337 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8338
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008339 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8340 bp->common.bc_ver = val;
8341 BNX2X_DEV_INFO("bc_ver %X\n", val);
8342 if (val < BNX2X_BC_VER) {
8343 /* for now only warn
8344 * later we might need to enforce this */
8345 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8346 " please upgrade BC\n", BNX2X_BC_VER, val);
8347 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008348 bp->link_params.feature_config_flags |=
8349 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8350 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008351
8352 if (BP_E1HVN(bp) == 0) {
8353 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8354 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8355 } else {
8356 /* no WOL capability for E1HVN != 0 */
8357 bp->flags |= NO_WOL_FLAG;
8358 }
8359 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008360 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008361
8362 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8363 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8364 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8365 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8366
8367 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8368 val, val2, val3, val4);
8369}
8370
8371static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8372 u32 switch_cfg)
8373{
8374 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008375 u32 ext_phy_type;
8376
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008377 switch (switch_cfg) {
8378 case SWITCH_CFG_1G:
8379 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8380
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008381 ext_phy_type =
8382 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008383 switch (ext_phy_type) {
8384 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8385 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8386 ext_phy_type);
8387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008388 bp->port.supported |= (SUPPORTED_10baseT_Half |
8389 SUPPORTED_10baseT_Full |
8390 SUPPORTED_100baseT_Half |
8391 SUPPORTED_100baseT_Full |
8392 SUPPORTED_1000baseT_Full |
8393 SUPPORTED_2500baseX_Full |
8394 SUPPORTED_TP |
8395 SUPPORTED_FIBRE |
8396 SUPPORTED_Autoneg |
8397 SUPPORTED_Pause |
8398 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008399 break;
8400
8401 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8402 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8403 ext_phy_type);
8404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008405 bp->port.supported |= (SUPPORTED_10baseT_Half |
8406 SUPPORTED_10baseT_Full |
8407 SUPPORTED_100baseT_Half |
8408 SUPPORTED_100baseT_Full |
8409 SUPPORTED_1000baseT_Full |
8410 SUPPORTED_TP |
8411 SUPPORTED_FIBRE |
8412 SUPPORTED_Autoneg |
8413 SUPPORTED_Pause |
8414 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008415 break;
8416
8417 default:
8418 BNX2X_ERR("NVRAM config error. "
8419 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008420 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008421 return;
8422 }
8423
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008424 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8425 port*0x10);
8426 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008427 break;
8428
8429 case SWITCH_CFG_10G:
8430 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8431
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008432 ext_phy_type =
8433 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008434 switch (ext_phy_type) {
8435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8436 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8437 ext_phy_type);
8438
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008439 bp->port.supported |= (SUPPORTED_10baseT_Half |
8440 SUPPORTED_10baseT_Full |
8441 SUPPORTED_100baseT_Half |
8442 SUPPORTED_100baseT_Full |
8443 SUPPORTED_1000baseT_Full |
8444 SUPPORTED_2500baseX_Full |
8445 SUPPORTED_10000baseT_Full |
8446 SUPPORTED_TP |
8447 SUPPORTED_FIBRE |
8448 SUPPORTED_Autoneg |
8449 SUPPORTED_Pause |
8450 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008451 break;
8452
Eliezer Tamirf1410642008-02-28 11:51:50 -08008453 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8454 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8455 ext_phy_type);
8456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008457 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8458 SUPPORTED_1000baseT_Full |
8459 SUPPORTED_FIBRE |
8460 SUPPORTED_Autoneg |
8461 SUPPORTED_Pause |
8462 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008463 break;
8464
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8466 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8467 ext_phy_type);
8468
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008469 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8470 SUPPORTED_2500baseX_Full |
8471 SUPPORTED_1000baseT_Full |
8472 SUPPORTED_FIBRE |
8473 SUPPORTED_Autoneg |
8474 SUPPORTED_Pause |
8475 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008476 break;
8477
Eilon Greenstein589abe32009-02-12 08:36:55 +00008478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8479 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8480 ext_phy_type);
8481
8482 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8483 SUPPORTED_FIBRE |
8484 SUPPORTED_Pause |
8485 SUPPORTED_Asym_Pause);
8486 break;
8487
8488 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8489 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8490 ext_phy_type);
8491
8492 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8493 SUPPORTED_1000baseT_Full |
8494 SUPPORTED_FIBRE |
8495 SUPPORTED_Pause |
8496 SUPPORTED_Asym_Pause);
8497 break;
8498
8499 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8500 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8501 ext_phy_type);
8502
8503 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8504 SUPPORTED_1000baseT_Full |
8505 SUPPORTED_Autoneg |
8506 SUPPORTED_FIBRE |
8507 SUPPORTED_Pause |
8508 SUPPORTED_Asym_Pause);
8509 break;
8510
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8512 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8513 ext_phy_type);
8514
8515 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8516 SUPPORTED_1000baseT_Full |
8517 SUPPORTED_Autoneg |
8518 SUPPORTED_FIBRE |
8519 SUPPORTED_Pause |
8520 SUPPORTED_Asym_Pause);
8521 break;
8522
Eliezer Tamirf1410642008-02-28 11:51:50 -08008523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8524 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8525 ext_phy_type);
8526
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008527 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8528 SUPPORTED_TP |
8529 SUPPORTED_Autoneg |
8530 SUPPORTED_Pause |
8531 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008532 break;
8533
Eilon Greenstein28577182009-02-12 08:37:00 +00008534 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8535 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8536 ext_phy_type);
8537
8538 bp->port.supported |= (SUPPORTED_10baseT_Half |
8539 SUPPORTED_10baseT_Full |
8540 SUPPORTED_100baseT_Half |
8541 SUPPORTED_100baseT_Full |
8542 SUPPORTED_1000baseT_Full |
8543 SUPPORTED_10000baseT_Full |
8544 SUPPORTED_TP |
8545 SUPPORTED_Autoneg |
8546 SUPPORTED_Pause |
8547 SUPPORTED_Asym_Pause);
8548 break;
8549
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008550 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8551 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8552 bp->link_params.ext_phy_config);
8553 break;
8554
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008555 default:
8556 BNX2X_ERR("NVRAM config error. "
8557 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008558 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008559 return;
8560 }
8561
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008562 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8563 port*0x18);
8564 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008565
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008566 break;
8567
8568 default:
8569 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008570 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008571 return;
8572 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008573 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008574
8575 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008576 if (!(bp->link_params.speed_cap_mask &
8577 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008578 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008579
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008580 if (!(bp->link_params.speed_cap_mask &
8581 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008582 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008583
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008584 if (!(bp->link_params.speed_cap_mask &
8585 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008586 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008587
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008588 if (!(bp->link_params.speed_cap_mask &
8589 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008590 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008591
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008592 if (!(bp->link_params.speed_cap_mask &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008594 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8595 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008596
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008597 if (!(bp->link_params.speed_cap_mask &
8598 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008599 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008601 if (!(bp->link_params.speed_cap_mask &
8602 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008603 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008604
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008605 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008606}
8607
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008608static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008609{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008610 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008611
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008612 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008613 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008614 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008615 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008616 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008617 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008618 u32 ext_phy_type =
8619 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8620
8621 if ((ext_phy_type ==
8622 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8623 (ext_phy_type ==
8624 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008625 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008626 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008627 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008628 (ADVERTISED_10000baseT_Full |
8629 ADVERTISED_FIBRE);
8630 break;
8631 }
8632 BNX2X_ERR("NVRAM config error. "
8633 "Invalid link_config 0x%x"
8634 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008635 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008636 return;
8637 }
8638 break;
8639
8640 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008641 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008642 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008643 bp->port.advertising = (ADVERTISED_10baseT_Full |
8644 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008645 } else {
8646 BNX2X_ERR("NVRAM config error. "
8647 "Invalid link_config 0x%x"
8648 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008649 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008650 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008651 return;
8652 }
8653 break;
8654
8655 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008656 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008657 bp->link_params.req_line_speed = SPEED_10;
8658 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008659 bp->port.advertising = (ADVERTISED_10baseT_Half |
8660 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008661 } else {
8662 BNX2X_ERR("NVRAM config error. "
8663 "Invalid link_config 0x%x"
8664 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008665 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008666 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008667 return;
8668 }
8669 break;
8670
8671 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008672 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008673 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008674 bp->port.advertising = (ADVERTISED_100baseT_Full |
8675 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008676 } else {
8677 BNX2X_ERR("NVRAM config error. "
8678 "Invalid link_config 0x%x"
8679 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008680 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008681 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008682 return;
8683 }
8684 break;
8685
8686 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008687 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008688 bp->link_params.req_line_speed = SPEED_100;
8689 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008690 bp->port.advertising = (ADVERTISED_100baseT_Half |
8691 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008692 } else {
8693 BNX2X_ERR("NVRAM config error. "
8694 "Invalid link_config 0x%x"
8695 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008696 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008697 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008698 return;
8699 }
8700 break;
8701
8702 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008703 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008704 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008705 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8706 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008707 } else {
8708 BNX2X_ERR("NVRAM config error. "
8709 "Invalid link_config 0x%x"
8710 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008711 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008712 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008713 return;
8714 }
8715 break;
8716
8717 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008718 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008719 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008720 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8721 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008722 } else {
8723 BNX2X_ERR("NVRAM config error. "
8724 "Invalid link_config 0x%x"
8725 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008726 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008727 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008728 return;
8729 }
8730 break;
8731
8732 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8733 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8734 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008735 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008736 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008737 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8738 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008739 } else {
8740 BNX2X_ERR("NVRAM config error. "
8741 "Invalid link_config 0x%x"
8742 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008743 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008744 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008745 return;
8746 }
8747 break;
8748
8749 default:
8750 BNX2X_ERR("NVRAM config error. "
8751 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008752 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008753 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008754 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008755 break;
8756 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008757
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008758 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8759 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008760 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008761 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008762 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008763
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008764 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008765 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008766 bp->link_params.req_line_speed,
8767 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008768 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008769}
8770
Michael Chane665bfd2009-10-10 13:46:54 +00008771static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8772{
8773 mac_hi = cpu_to_be16(mac_hi);
8774 mac_lo = cpu_to_be32(mac_lo);
8775 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8776 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8777}
8778
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008779static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008780{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008781 int port = BP_PORT(bp);
8782 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008783 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008784 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008785 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008786
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008787 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008788 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008789
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008790 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008791 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008792 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008793 SHMEM_RD(bp,
8794 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008795 /* BCM8727_NOC => BCM8727 no over current */
8796 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8797 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8798 bp->link_params.ext_phy_config &=
8799 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8800 bp->link_params.ext_phy_config |=
8801 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8802 bp->link_params.feature_config_flags |=
8803 FEATURE_CONFIG_BCM8727_NOC;
8804 }
8805
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008806 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008807 SHMEM_RD(bp,
8808 dev_info.port_hw_config[port].speed_capability_mask);
8809
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008810 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008811 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8812
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008813 /* Get the 4 lanes xgxs config rx and tx */
8814 for (i = 0; i < 2; i++) {
8815 val = SHMEM_RD(bp,
8816 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8817 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8818 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8819
8820 val = SHMEM_RD(bp,
8821 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8822 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8823 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8824 }
8825
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008826 /* If the device is capable of WoL, set the default state according
8827 * to the HW
8828 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008829 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008830 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8831 (config & PORT_FEATURE_WOL_ENABLED));
8832
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008833 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8834 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008835 bp->link_params.lane_config,
8836 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008837 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008838
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008839 bp->link_params.switch_cfg |= (bp->port.link_config &
8840 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008841 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008842
8843 bnx2x_link_settings_requested(bp);
8844
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008845 /*
8846 * If connected directly, work with the internal PHY, otherwise, work
8847 * with the external PHY
8848 */
8849 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8850 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8851 bp->mdio.prtad = bp->link_params.phy_addr;
8852
8853 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8854 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8855 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008858 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8859 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008860 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008861 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8862 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008863
8864#ifdef BCM_CNIC
8865 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8866 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8867 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8868#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008869}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008870
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008871static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8872{
8873 int func = BP_FUNC(bp);
8874 u32 val, val2;
8875 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008876
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008877 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008878
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 bp->e1hov = 0;
8880 bp->e1hmf = 0;
8881 if (CHIP_IS_E1H(bp)) {
8882 bp->mf_config =
8883 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008884
Eilon Greenstein2691d512009-08-12 08:22:08 +00008885 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008886 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008887 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008888 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008889 BNX2X_DEV_INFO("%s function mode\n",
8890 IS_E1HMF(bp) ? "multi" : "single");
8891
8892 if (IS_E1HMF(bp)) {
8893 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8894 e1hov_tag) &
8895 FUNC_MF_CFG_E1HOV_TAG_MASK);
8896 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8897 bp->e1hov = val;
8898 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8899 "(0x%04x)\n",
8900 func, bp->e1hov, bp->e1hov);
8901 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008902 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8903 " aborting\n", func);
8904 rc = -EPERM;
8905 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008906 } else {
8907 if (BP_E1HVN(bp)) {
8908 BNX2X_ERR("!!! VN %d in single function mode,"
8909 " aborting\n", BP_E1HVN(bp));
8910 rc = -EPERM;
8911 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008912 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008913 }
8914
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008915 if (!BP_NOMCP(bp)) {
8916 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008917
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008918 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8919 DRV_MSG_SEQ_NUMBER_MASK);
8920 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8921 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008922
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008923 if (IS_E1HMF(bp)) {
8924 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8925 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8926 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8927 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8928 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8929 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8930 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8931 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8932 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8933 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8934 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8935 ETH_ALEN);
8936 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8937 ETH_ALEN);
8938 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008939
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008940 return rc;
8941 }
8942
8943 if (BP_NOMCP(bp)) {
8944 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008945 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008946 random_ether_addr(bp->dev->dev_addr);
8947 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8948 }
8949
8950 return rc;
8951}
8952
8953static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8954{
8955 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008956 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008957 int rc;
8958
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008959 /* Disable interrupt handling until HW is initialized */
8960 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008961 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008962
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008963 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008964 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008965#ifdef BCM_CNIC
8966 mutex_init(&bp->cnic_mutex);
8967#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008968
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008969 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008970 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8971
8972 rc = bnx2x_get_hwinfo(bp);
8973
8974 /* need to reset chip if undi was active */
8975 if (!BP_NOMCP(bp))
8976 bnx2x_undi_unload(bp);
8977
8978 if (CHIP_REV_IS_FPGA(bp))
8979 printk(KERN_ERR PFX "FPGA detected\n");
8980
8981 if (BP_NOMCP(bp) && (func == 0))
8982 printk(KERN_ERR PFX
8983 "MCP disabled, must load devices in order!\n");
8984
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008985 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008986 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8987 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008988 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008989 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008990 multi_mode = ETH_RSS_MODE_DISABLED;
8991 }
8992 bp->multi_mode = multi_mode;
8993
8994
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008995 /* Set TPA flags */
8996 if (disable_tpa) {
8997 bp->flags &= ~TPA_ENABLE_FLAG;
8998 bp->dev->features &= ~NETIF_F_LRO;
8999 } else {
9000 bp->flags |= TPA_ENABLE_FLAG;
9001 bp->dev->features |= NETIF_F_LRO;
9002 }
9003
Eilon Greensteina18f5122009-08-12 08:23:26 +00009004 if (CHIP_IS_E1(bp))
9005 bp->dropless_fc = 0;
9006 else
9007 bp->dropless_fc = dropless_fc;
9008
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00009009 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009010
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009011 bp->tx_ring_size = MAX_TX_AVAIL;
9012 bp->rx_ring_size = MAX_RX_AVAIL;
9013
9014 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009015
9016 bp->tx_ticks = 50;
9017 bp->rx_ticks = 25;
9018
Eilon Greenstein87942b42009-02-12 08:36:49 +00009019 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9020 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009021
9022 init_timer(&bp->timer);
9023 bp->timer.expires = jiffies + bp->current_interval;
9024 bp->timer.data = (unsigned long) bp;
9025 bp->timer.function = bnx2x_timer;
9026
9027 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009028}
9029
9030/*
9031 * ethtool service functions
9032 */
9033
9034/* All ethtool functions called with rtnl_lock */
9035
9036static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9037{
9038 struct bnx2x *bp = netdev_priv(dev);
9039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009040 cmd->supported = bp->port.supported;
9041 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009042
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009043 if ((bp->state == BNX2X_STATE_OPEN) &&
9044 !(bp->flags & MF_FUNC_DIS) &&
9045 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009046 cmd->speed = bp->link_vars.line_speed;
9047 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07009048 if (IS_E1HMF(bp)) {
9049 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009050
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07009051 vn_max_rate =
9052 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009053 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07009054 if (vn_max_rate < cmd->speed)
9055 cmd->speed = vn_max_rate;
9056 }
9057 } else {
9058 cmd->speed = -1;
9059 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009060 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009061
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009062 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9063 u32 ext_phy_type =
9064 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009065
9066 switch (ext_phy_type) {
9067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009074 cmd->port = PORT_FIBRE;
9075 break;
9076
9077 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009078 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009079 cmd->port = PORT_TP;
9080 break;
9081
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9083 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9084 bp->link_params.ext_phy_config);
9085 break;
9086
Eliezer Tamirf1410642008-02-28 11:51:50 -08009087 default:
9088 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009089 bp->link_params.ext_phy_config);
9090 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009091 }
9092 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009093 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009094
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009095 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009096 cmd->transceiver = XCVR_INTERNAL;
9097
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009098 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009099 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009100 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009101 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009102
9103 cmd->maxtxpkt = 0;
9104 cmd->maxrxpkt = 0;
9105
9106 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9107 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9108 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9109 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9110 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9111 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9112 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9113
9114 return 0;
9115}
9116
9117static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9118{
9119 struct bnx2x *bp = netdev_priv(dev);
9120 u32 advertising;
9121
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009122 if (IS_E1HMF(bp))
9123 return 0;
9124
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009125 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9126 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9127 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9128 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9129 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9130 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9131 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9132
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009133 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009134 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9135 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009136 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009137 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138
9139 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009140 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009141
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009142 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9143 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009144 bp->port.advertising |= (ADVERTISED_Autoneg |
9145 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009146
9147 } else { /* forced speed */
9148 /* advertise the requested speed and duplex if supported */
9149 switch (cmd->speed) {
9150 case SPEED_10:
9151 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009152 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009153 SUPPORTED_10baseT_Full)) {
9154 DP(NETIF_MSG_LINK,
9155 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009156 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009157 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009158
9159 advertising = (ADVERTISED_10baseT_Full |
9160 ADVERTISED_TP);
9161 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009162 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009163 SUPPORTED_10baseT_Half)) {
9164 DP(NETIF_MSG_LINK,
9165 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009166 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009167 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009168
9169 advertising = (ADVERTISED_10baseT_Half |
9170 ADVERTISED_TP);
9171 }
9172 break;
9173
9174 case SPEED_100:
9175 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009176 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009177 SUPPORTED_100baseT_Full)) {
9178 DP(NETIF_MSG_LINK,
9179 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009180 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009181 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009182
9183 advertising = (ADVERTISED_100baseT_Full |
9184 ADVERTISED_TP);
9185 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009186 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009187 SUPPORTED_100baseT_Half)) {
9188 DP(NETIF_MSG_LINK,
9189 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009190 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009191 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009192
9193 advertising = (ADVERTISED_100baseT_Half |
9194 ADVERTISED_TP);
9195 }
9196 break;
9197
9198 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009199 if (cmd->duplex != DUPLEX_FULL) {
9200 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009201 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009202 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009203
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009204 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009205 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009206 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009207 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009208
9209 advertising = (ADVERTISED_1000baseT_Full |
9210 ADVERTISED_TP);
9211 break;
9212
9213 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009214 if (cmd->duplex != DUPLEX_FULL) {
9215 DP(NETIF_MSG_LINK,
9216 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009217 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009218 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009219
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009220 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009221 DP(NETIF_MSG_LINK,
9222 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009223 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009224 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009225
Eliezer Tamirf1410642008-02-28 11:51:50 -08009226 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009227 ADVERTISED_TP);
9228 break;
9229
9230 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009231 if (cmd->duplex != DUPLEX_FULL) {
9232 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009233 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009234 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009235
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009236 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009237 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009238 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009239 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009240
9241 advertising = (ADVERTISED_10000baseT_Full |
9242 ADVERTISED_FIBRE);
9243 break;
9244
9245 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009246 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009247 return -EINVAL;
9248 }
9249
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009250 bp->link_params.req_line_speed = cmd->speed;
9251 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009252 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009253 }
9254
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009255 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009256 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009257 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009258 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009259
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009260 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009261 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009262 bnx2x_link_set(bp);
9263 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009264
9265 return 0;
9266}
9267
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009268#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9269#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9270
9271static int bnx2x_get_regs_len(struct net_device *dev)
9272{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009273 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009274 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009275 int i;
9276
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009277 if (CHIP_IS_E1(bp)) {
9278 for (i = 0; i < REGS_COUNT; i++)
9279 if (IS_E1_ONLINE(reg_addrs[i].info))
9280 regdump_len += reg_addrs[i].size;
9281
9282 for (i = 0; i < WREGS_COUNT_E1; i++)
9283 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9284 regdump_len += wreg_addrs_e1[i].size *
9285 (1 + wreg_addrs_e1[i].read_regs_count);
9286
9287 } else { /* E1H */
9288 for (i = 0; i < REGS_COUNT; i++)
9289 if (IS_E1H_ONLINE(reg_addrs[i].info))
9290 regdump_len += reg_addrs[i].size;
9291
9292 for (i = 0; i < WREGS_COUNT_E1H; i++)
9293 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9294 regdump_len += wreg_addrs_e1h[i].size *
9295 (1 + wreg_addrs_e1h[i].read_regs_count);
9296 }
9297 regdump_len *= 4;
9298 regdump_len += sizeof(struct dump_hdr);
9299
9300 return regdump_len;
9301}
9302
9303static void bnx2x_get_regs(struct net_device *dev,
9304 struct ethtool_regs *regs, void *_p)
9305{
9306 u32 *p = _p, i, j;
9307 struct bnx2x *bp = netdev_priv(dev);
9308 struct dump_hdr dump_hdr = {0};
9309
9310 regs->version = 0;
9311 memset(p, 0, regs->len);
9312
9313 if (!netif_running(bp->dev))
9314 return;
9315
9316 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9317 dump_hdr.dump_sign = dump_sign_all;
9318 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9319 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9320 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9321 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9322 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9323
9324 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9325 p += dump_hdr.hdr_size + 1;
9326
9327 if (CHIP_IS_E1(bp)) {
9328 for (i = 0; i < REGS_COUNT; i++)
9329 if (IS_E1_ONLINE(reg_addrs[i].info))
9330 for (j = 0; j < reg_addrs[i].size; j++)
9331 *p++ = REG_RD(bp,
9332 reg_addrs[i].addr + j*4);
9333
9334 } else { /* E1H */
9335 for (i = 0; i < REGS_COUNT; i++)
9336 if (IS_E1H_ONLINE(reg_addrs[i].info))
9337 for (j = 0; j < reg_addrs[i].size; j++)
9338 *p++ = REG_RD(bp,
9339 reg_addrs[i].addr + j*4);
9340 }
9341}
9342
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009343#define PHY_FW_VER_LEN 10
9344
9345static void bnx2x_get_drvinfo(struct net_device *dev,
9346 struct ethtool_drvinfo *info)
9347{
9348 struct bnx2x *bp = netdev_priv(dev);
9349 u8 phy_fw_ver[PHY_FW_VER_LEN];
9350
9351 strcpy(info->driver, DRV_MODULE_NAME);
9352 strcpy(info->version, DRV_MODULE_VERSION);
9353
9354 phy_fw_ver[0] = '\0';
9355 if (bp->port.pmf) {
9356 bnx2x_acquire_phy_lock(bp);
9357 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9358 (bp->state != BNX2X_STATE_CLOSED),
9359 phy_fw_ver, PHY_FW_VER_LEN);
9360 bnx2x_release_phy_lock(bp);
9361 }
9362
9363 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9364 (bp->common.bc_ver & 0xff0000) >> 16,
9365 (bp->common.bc_ver & 0xff00) >> 8,
9366 (bp->common.bc_ver & 0xff),
9367 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9368 strcpy(info->bus_info, pci_name(bp->pdev));
9369 info->n_stats = BNX2X_NUM_STATS;
9370 info->testinfo_len = BNX2X_NUM_TESTS;
9371 info->eedump_len = bp->common.flash_size;
9372 info->regdump_len = bnx2x_get_regs_len(dev);
9373}
9374
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009375static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9376{
9377 struct bnx2x *bp = netdev_priv(dev);
9378
9379 if (bp->flags & NO_WOL_FLAG) {
9380 wol->supported = 0;
9381 wol->wolopts = 0;
9382 } else {
9383 wol->supported = WAKE_MAGIC;
9384 if (bp->wol)
9385 wol->wolopts = WAKE_MAGIC;
9386 else
9387 wol->wolopts = 0;
9388 }
9389 memset(&wol->sopass, 0, sizeof(wol->sopass));
9390}
9391
9392static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9393{
9394 struct bnx2x *bp = netdev_priv(dev);
9395
9396 if (wol->wolopts & ~WAKE_MAGIC)
9397 return -EINVAL;
9398
9399 if (wol->wolopts & WAKE_MAGIC) {
9400 if (bp->flags & NO_WOL_FLAG)
9401 return -EINVAL;
9402
9403 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009404 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009405 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009406
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009407 return 0;
9408}
9409
9410static u32 bnx2x_get_msglevel(struct net_device *dev)
9411{
9412 struct bnx2x *bp = netdev_priv(dev);
9413
9414 return bp->msglevel;
9415}
9416
9417static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9418{
9419 struct bnx2x *bp = netdev_priv(dev);
9420
9421 if (capable(CAP_NET_ADMIN))
9422 bp->msglevel = level;
9423}
9424
9425static int bnx2x_nway_reset(struct net_device *dev)
9426{
9427 struct bnx2x *bp = netdev_priv(dev);
9428
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009429 if (!bp->port.pmf)
9430 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009431
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009432 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009434 bnx2x_link_set(bp);
9435 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009436
9437 return 0;
9438}
9439
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009440static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009441{
9442 struct bnx2x *bp = netdev_priv(dev);
9443
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009444 if (bp->flags & MF_FUNC_DIS)
9445 return 0;
9446
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009447 return bp->link_vars.link_up;
9448}
9449
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009450static int bnx2x_get_eeprom_len(struct net_device *dev)
9451{
9452 struct bnx2x *bp = netdev_priv(dev);
9453
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009454 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009455}
9456
9457static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9458{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009459 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009460 int count, i;
9461 u32 val = 0;
9462
9463 /* adjust timeout for emulation/FPGA */
9464 count = NVRAM_TIMEOUT_COUNT;
9465 if (CHIP_REV_IS_SLOW(bp))
9466 count *= 100;
9467
9468 /* request access to nvram interface */
9469 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9470 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9471
9472 for (i = 0; i < count*10; i++) {
9473 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9474 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9475 break;
9476
9477 udelay(5);
9478 }
9479
9480 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009481 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009482 return -EBUSY;
9483 }
9484
9485 return 0;
9486}
9487
9488static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9489{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009490 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009491 int count, i;
9492 u32 val = 0;
9493
9494 /* adjust timeout for emulation/FPGA */
9495 count = NVRAM_TIMEOUT_COUNT;
9496 if (CHIP_REV_IS_SLOW(bp))
9497 count *= 100;
9498
9499 /* relinquish nvram interface */
9500 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9501 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9502
9503 for (i = 0; i < count*10; i++) {
9504 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9505 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9506 break;
9507
9508 udelay(5);
9509 }
9510
9511 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009512 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009513 return -EBUSY;
9514 }
9515
9516 return 0;
9517}
9518
9519static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9520{
9521 u32 val;
9522
9523 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9524
9525 /* enable both bits, even on read */
9526 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9527 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9528 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9529}
9530
9531static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9532{
9533 u32 val;
9534
9535 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9536
9537 /* disable both bits, even after read */
9538 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9539 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9540 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9541}
9542
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009543static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009544 u32 cmd_flags)
9545{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009546 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009547 u32 val;
9548
9549 /* build the command word */
9550 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9551
9552 /* need to clear DONE bit separately */
9553 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9554
9555 /* address of the NVRAM to read from */
9556 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9557 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9558
9559 /* issue a read command */
9560 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9561
9562 /* adjust timeout for emulation/FPGA */
9563 count = NVRAM_TIMEOUT_COUNT;
9564 if (CHIP_REV_IS_SLOW(bp))
9565 count *= 100;
9566
9567 /* wait for completion */
9568 *ret_val = 0;
9569 rc = -EBUSY;
9570 for (i = 0; i < count; i++) {
9571 udelay(5);
9572 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9573
9574 if (val & MCPR_NVM_COMMAND_DONE) {
9575 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009576 /* we read nvram data in cpu order
9577 * but ethtool sees it as an array of bytes
9578 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009579 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009580 rc = 0;
9581 break;
9582 }
9583 }
9584
9585 return rc;
9586}
9587
9588static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9589 int buf_size)
9590{
9591 int rc;
9592 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009593 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009594
9595 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009596 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009597 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009598 offset, buf_size);
9599 return -EINVAL;
9600 }
9601
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009602 if (offset + buf_size > bp->common.flash_size) {
9603 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009604 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009605 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009606 return -EINVAL;
9607 }
9608
9609 /* request access to nvram interface */
9610 rc = bnx2x_acquire_nvram_lock(bp);
9611 if (rc)
9612 return rc;
9613
9614 /* enable access to nvram interface */
9615 bnx2x_enable_nvram_access(bp);
9616
9617 /* read the first word(s) */
9618 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9619 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9620 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9621 memcpy(ret_buf, &val, 4);
9622
9623 /* advance to the next dword */
9624 offset += sizeof(u32);
9625 ret_buf += sizeof(u32);
9626 buf_size -= sizeof(u32);
9627 cmd_flags = 0;
9628 }
9629
9630 if (rc == 0) {
9631 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9632 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9633 memcpy(ret_buf, &val, 4);
9634 }
9635
9636 /* disable access to nvram interface */
9637 bnx2x_disable_nvram_access(bp);
9638 bnx2x_release_nvram_lock(bp);
9639
9640 return rc;
9641}
9642
9643static int bnx2x_get_eeprom(struct net_device *dev,
9644 struct ethtool_eeprom *eeprom, u8 *eebuf)
9645{
9646 struct bnx2x *bp = netdev_priv(dev);
9647 int rc;
9648
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009649 if (!netif_running(dev))
9650 return -EAGAIN;
9651
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009652 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009653 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9654 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9655 eeprom->len, eeprom->len);
9656
9657 /* parameters already validated in ethtool_get_eeprom */
9658
9659 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9660
9661 return rc;
9662}
9663
9664static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9665 u32 cmd_flags)
9666{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009667 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009668
9669 /* build the command word */
9670 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9671
9672 /* need to clear DONE bit separately */
9673 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9674
9675 /* write the data */
9676 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9677
9678 /* address of the NVRAM to write to */
9679 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9680 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9681
9682 /* issue the write command */
9683 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9684
9685 /* adjust timeout for emulation/FPGA */
9686 count = NVRAM_TIMEOUT_COUNT;
9687 if (CHIP_REV_IS_SLOW(bp))
9688 count *= 100;
9689
9690 /* wait for completion */
9691 rc = -EBUSY;
9692 for (i = 0; i < count; i++) {
9693 udelay(5);
9694 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9695 if (val & MCPR_NVM_COMMAND_DONE) {
9696 rc = 0;
9697 break;
9698 }
9699 }
9700
9701 return rc;
9702}
9703
Eliezer Tamirf1410642008-02-28 11:51:50 -08009704#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009705
9706static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9707 int buf_size)
9708{
9709 int rc;
9710 u32 cmd_flags;
9711 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009712 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009713
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009714 if (offset + buf_size > bp->common.flash_size) {
9715 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009716 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009717 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009718 return -EINVAL;
9719 }
9720
9721 /* request access to nvram interface */
9722 rc = bnx2x_acquire_nvram_lock(bp);
9723 if (rc)
9724 return rc;
9725
9726 /* enable access to nvram interface */
9727 bnx2x_enable_nvram_access(bp);
9728
9729 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9730 align_offset = (offset & ~0x03);
9731 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9732
9733 if (rc == 0) {
9734 val &= ~(0xff << BYTE_OFFSET(offset));
9735 val |= (*data_buf << BYTE_OFFSET(offset));
9736
9737 /* nvram data is returned as an array of bytes
9738 * convert it back to cpu order */
9739 val = be32_to_cpu(val);
9740
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009741 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9742 cmd_flags);
9743 }
9744
9745 /* disable access to nvram interface */
9746 bnx2x_disable_nvram_access(bp);
9747 bnx2x_release_nvram_lock(bp);
9748
9749 return rc;
9750}
9751
9752static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9753 int buf_size)
9754{
9755 int rc;
9756 u32 cmd_flags;
9757 u32 val;
9758 u32 written_so_far;
9759
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009760 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009761 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009762
9763 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009764 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009765 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009766 offset, buf_size);
9767 return -EINVAL;
9768 }
9769
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009770 if (offset + buf_size > bp->common.flash_size) {
9771 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009772 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009773 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009774 return -EINVAL;
9775 }
9776
9777 /* request access to nvram interface */
9778 rc = bnx2x_acquire_nvram_lock(bp);
9779 if (rc)
9780 return rc;
9781
9782 /* enable access to nvram interface */
9783 bnx2x_enable_nvram_access(bp);
9784
9785 written_so_far = 0;
9786 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9787 while ((written_so_far < buf_size) && (rc == 0)) {
9788 if (written_so_far == (buf_size - sizeof(u32)))
9789 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9790 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9791 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9792 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9793 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9794
9795 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009796
9797 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9798
9799 /* advance to the next dword */
9800 offset += sizeof(u32);
9801 data_buf += sizeof(u32);
9802 written_so_far += sizeof(u32);
9803 cmd_flags = 0;
9804 }
9805
9806 /* disable access to nvram interface */
9807 bnx2x_disable_nvram_access(bp);
9808 bnx2x_release_nvram_lock(bp);
9809
9810 return rc;
9811}
9812
9813static int bnx2x_set_eeprom(struct net_device *dev,
9814 struct ethtool_eeprom *eeprom, u8 *eebuf)
9815{
9816 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009817 int port = BP_PORT(bp);
9818 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009819
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009820 if (!netif_running(dev))
9821 return -EAGAIN;
9822
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009823 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009824 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9825 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9826 eeprom->len, eeprom->len);
9827
9828 /* parameters already validated in ethtool_set_eeprom */
9829
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009830 /* PHY eeprom can be accessed only by the PMF */
9831 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9832 !bp->port.pmf)
9833 return -EINVAL;
9834
9835 if (eeprom->magic == 0x50485950) {
9836 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9837 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9838
9839 bnx2x_acquire_phy_lock(bp);
9840 rc |= bnx2x_link_reset(&bp->link_params,
9841 &bp->link_vars, 0);
9842 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9843 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9844 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9845 MISC_REGISTERS_GPIO_HIGH, port);
9846 bnx2x_release_phy_lock(bp);
9847 bnx2x_link_report(bp);
9848
9849 } else if (eeprom->magic == 0x50485952) {
9850 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009851 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009852 bnx2x_acquire_phy_lock(bp);
9853 rc |= bnx2x_link_reset(&bp->link_params,
9854 &bp->link_vars, 1);
9855
9856 rc |= bnx2x_phy_init(&bp->link_params,
9857 &bp->link_vars);
9858 bnx2x_release_phy_lock(bp);
9859 bnx2x_calc_fc_adv(bp);
9860 }
9861 } else if (eeprom->magic == 0x53985943) {
9862 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9863 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9864 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9865 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009866 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009867
9868 /* DSP Remove Download Mode */
9869 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9870 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009871
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009872 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009873
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009874 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9875
9876 /* wait 0.5 sec to allow it to run */
9877 msleep(500);
9878 bnx2x_ext_phy_hw_reset(bp, port);
9879 msleep(500);
9880 bnx2x_release_phy_lock(bp);
9881 }
9882 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009883 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009884
9885 return rc;
9886}
9887
9888static int bnx2x_get_coalesce(struct net_device *dev,
9889 struct ethtool_coalesce *coal)
9890{
9891 struct bnx2x *bp = netdev_priv(dev);
9892
9893 memset(coal, 0, sizeof(struct ethtool_coalesce));
9894
9895 coal->rx_coalesce_usecs = bp->rx_ticks;
9896 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009897
9898 return 0;
9899}
9900
Eilon Greensteinca003922009-08-12 22:53:28 -07009901#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009902static int bnx2x_set_coalesce(struct net_device *dev,
9903 struct ethtool_coalesce *coal)
9904{
9905 struct bnx2x *bp = netdev_priv(dev);
9906
9907 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009908 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9909 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009910
9911 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009912 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9913 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009914
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009915 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009916 bnx2x_update_coalesce(bp);
9917
9918 return 0;
9919}
9920
9921static void bnx2x_get_ringparam(struct net_device *dev,
9922 struct ethtool_ringparam *ering)
9923{
9924 struct bnx2x *bp = netdev_priv(dev);
9925
9926 ering->rx_max_pending = MAX_RX_AVAIL;
9927 ering->rx_mini_max_pending = 0;
9928 ering->rx_jumbo_max_pending = 0;
9929
9930 ering->rx_pending = bp->rx_ring_size;
9931 ering->rx_mini_pending = 0;
9932 ering->rx_jumbo_pending = 0;
9933
9934 ering->tx_max_pending = MAX_TX_AVAIL;
9935 ering->tx_pending = bp->tx_ring_size;
9936}
9937
9938static int bnx2x_set_ringparam(struct net_device *dev,
9939 struct ethtool_ringparam *ering)
9940{
9941 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009942 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009943
9944 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9945 (ering->tx_pending > MAX_TX_AVAIL) ||
9946 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9947 return -EINVAL;
9948
9949 bp->rx_ring_size = ering->rx_pending;
9950 bp->tx_ring_size = ering->tx_pending;
9951
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009952 if (netif_running(dev)) {
9953 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9954 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009955 }
9956
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009957 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009958}
9959
9960static void bnx2x_get_pauseparam(struct net_device *dev,
9961 struct ethtool_pauseparam *epause)
9962{
9963 struct bnx2x *bp = netdev_priv(dev);
9964
Eilon Greenstein356e2382009-02-12 08:38:32 +00009965 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9966 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009967 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9968
David S. Millerc0700f92008-12-16 23:53:20 -08009969 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9970 BNX2X_FLOW_CTRL_RX);
9971 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9972 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009973
9974 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9975 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9976 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9977}
9978
9979static int bnx2x_set_pauseparam(struct net_device *dev,
9980 struct ethtool_pauseparam *epause)
9981{
9982 struct bnx2x *bp = netdev_priv(dev);
9983
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009984 if (IS_E1HMF(bp))
9985 return 0;
9986
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009987 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9988 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9989 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9990
David S. Millerc0700f92008-12-16 23:53:20 -08009991 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009992
9993 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009994 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009995
9996 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009997 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009998
David S. Millerc0700f92008-12-16 23:53:20 -08009999 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10000 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010001
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010002 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010003 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -070010004 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -080010005 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010006 }
10007
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010008 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -080010009 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010010 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010011
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010012 DP(NETIF_MSG_LINK,
10013 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010014
10015 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010016 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010017 bnx2x_link_set(bp);
10018 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010019
10020 return 0;
10021}
10022
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010023static int bnx2x_set_flags(struct net_device *dev, u32 data)
10024{
10025 struct bnx2x *bp = netdev_priv(dev);
10026 int changed = 0;
10027 int rc = 0;
10028
10029 /* TPA requires Rx CSUM offloading */
10030 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10031 if (!(dev->features & NETIF_F_LRO)) {
10032 dev->features |= NETIF_F_LRO;
10033 bp->flags |= TPA_ENABLE_FLAG;
10034 changed = 1;
10035 }
10036
10037 } else if (dev->features & NETIF_F_LRO) {
10038 dev->features &= ~NETIF_F_LRO;
10039 bp->flags &= ~TPA_ENABLE_FLAG;
10040 changed = 1;
10041 }
10042
10043 if (changed && netif_running(dev)) {
10044 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10045 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10046 }
10047
10048 return rc;
10049}
10050
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010051static u32 bnx2x_get_rx_csum(struct net_device *dev)
10052{
10053 struct bnx2x *bp = netdev_priv(dev);
10054
10055 return bp->rx_csum;
10056}
10057
10058static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10059{
10060 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010061 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010062
10063 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010064
10065 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10066 TPA'ed packets will be discarded due to wrong TCP CSUM */
10067 if (!data) {
10068 u32 flags = ethtool_op_get_flags(dev);
10069
10070 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10071 }
10072
10073 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010074}
10075
10076static int bnx2x_set_tso(struct net_device *dev, u32 data)
10077{
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010078 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010079 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010080 dev->features |= NETIF_F_TSO6;
10081 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010082 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010083 dev->features &= ~NETIF_F_TSO6;
10084 }
10085
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010086 return 0;
10087}
10088
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010089static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010090 char string[ETH_GSTRING_LEN];
10091} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010092 { "register_test (offline)" },
10093 { "memory_test (offline)" },
10094 { "loopback_test (offline)" },
10095 { "nvram_test (online)" },
10096 { "interrupt_test (online)" },
10097 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010098 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010099};
10100
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010101static int bnx2x_test_registers(struct bnx2x *bp)
10102{
10103 int idx, i, rc = -ENODEV;
10104 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010105 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010106 static const struct {
10107 u32 offset0;
10108 u32 offset1;
10109 u32 mask;
10110 } reg_tbl[] = {
10111/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10112 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10113 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10114 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10115 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10116 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10117 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10118 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10119 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10120 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10121/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10122 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10123 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10124 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10125 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10126 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10127 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10128 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010129 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010130 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10131/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010132 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10133 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10134 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10135 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10136 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10137 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10138 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10139 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010140 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10141/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010142 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10143 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10144 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10145 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10146 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10147 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10148
10149 { 0xffffffff, 0, 0x00000000 }
10150 };
10151
10152 if (!netif_running(bp->dev))
10153 return rc;
10154
10155 /* Repeat the test twice:
10156 First by writing 0x00000000, second by writing 0xffffffff */
10157 for (idx = 0; idx < 2; idx++) {
10158
10159 switch (idx) {
10160 case 0:
10161 wr_val = 0;
10162 break;
10163 case 1:
10164 wr_val = 0xffffffff;
10165 break;
10166 }
10167
10168 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10169 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010170
10171 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10172 mask = reg_tbl[i].mask;
10173
10174 save_val = REG_RD(bp, offset);
10175
10176 REG_WR(bp, offset, wr_val);
10177 val = REG_RD(bp, offset);
10178
10179 /* Restore the original register's value */
10180 REG_WR(bp, offset, save_val);
10181
10182 /* verify that value is as expected value */
10183 if ((val & mask) != (wr_val & mask))
10184 goto test_reg_exit;
10185 }
10186 }
10187
10188 rc = 0;
10189
10190test_reg_exit:
10191 return rc;
10192}
10193
10194static int bnx2x_test_memory(struct bnx2x *bp)
10195{
10196 int i, j, rc = -ENODEV;
10197 u32 val;
10198 static const struct {
10199 u32 offset;
10200 int size;
10201 } mem_tbl[] = {
10202 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10203 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10204 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10205 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10206 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10207 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10208 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10209
10210 { 0xffffffff, 0 }
10211 };
10212 static const struct {
10213 char *name;
10214 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010215 u32 e1_mask;
10216 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010217 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010218 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10219 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10220 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10221 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10222 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10223 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010224
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010225 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010226 };
10227
10228 if (!netif_running(bp->dev))
10229 return rc;
10230
10231 /* Go through all the memories */
10232 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10233 for (j = 0; j < mem_tbl[i].size; j++)
10234 REG_RD(bp, mem_tbl[i].offset + j*4);
10235
10236 /* Check the parity status */
10237 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10238 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010239 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10240 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010241 DP(NETIF_MSG_HW,
10242 "%s is 0x%x\n", prty_tbl[i].name, val);
10243 goto test_mem_exit;
10244 }
10245 }
10246
10247 rc = 0;
10248
10249test_mem_exit:
10250 return rc;
10251}
10252
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010253static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10254{
10255 int cnt = 1000;
10256
10257 if (link_up)
10258 while (bnx2x_link_test(bp) && cnt--)
10259 msleep(10);
10260}
10261
10262static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10263{
10264 unsigned int pkt_size, num_pkts, i;
10265 struct sk_buff *skb;
10266 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010267 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10268 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010269 u16 tx_start_idx, tx_idx;
10270 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010271 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010272 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010273 struct eth_tx_start_bd *tx_start_bd;
10274 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010275 dma_addr_t mapping;
10276 union eth_rx_cqe *cqe;
10277 u8 cqe_fp_flags;
10278 struct sw_rx_bd *rx_buf;
10279 u16 len;
10280 int rc = -ENODEV;
10281
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010282 /* check the loopback mode */
10283 switch (loopback_mode) {
10284 case BNX2X_PHY_LOOPBACK:
10285 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10286 return -EINVAL;
10287 break;
10288 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010289 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010290 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010291 break;
10292 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010293 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010294 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010295
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010296 /* prepare the loopback packet */
10297 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10298 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010299 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10300 if (!skb) {
10301 rc = -ENOMEM;
10302 goto test_loopback_exit;
10303 }
10304 packet = skb_put(skb, pkt_size);
10305 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010306 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10307 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 packet[i] = (unsigned char) (i & 0xff);
10310
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010311 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010312 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010313 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10314 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010315
Eilon Greensteinca003922009-08-12 22:53:28 -070010316 pkt_prod = fp_tx->tx_pkt_prod++;
10317 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10318 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010319 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010320 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010321
Eilon Greensteinca003922009-08-12 22:53:28 -070010322 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10323 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010324 mapping = pci_map_single(bp->pdev, skb->data,
10325 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010326 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10327 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10328 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10329 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10330 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10331 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10332 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10333 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10334
10335 /* turn on parsing and get a BD */
10336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10337 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10338
10339 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010340
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010341 wmb();
10342
Eilon Greensteinca003922009-08-12 22:53:28 -070010343 fp_tx->tx_db.data.prod += 2;
10344 barrier();
10345 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010346
10347 mmiowb();
10348
10349 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010350 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010351 bp->dev->trans_start = jiffies;
10352
10353 udelay(100);
10354
Eilon Greensteinca003922009-08-12 22:53:28 -070010355 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010356 if (tx_idx != tx_start_idx + num_pkts)
10357 goto test_loopback_exit;
10358
Eilon Greensteinca003922009-08-12 22:53:28 -070010359 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010360 if (rx_idx != rx_start_idx + num_pkts)
10361 goto test_loopback_exit;
10362
Eilon Greensteinca003922009-08-12 22:53:28 -070010363 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010364 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10365 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10366 goto test_loopback_rx_exit;
10367
10368 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10369 if (len != pkt_size)
10370 goto test_loopback_rx_exit;
10371
Eilon Greensteinca003922009-08-12 22:53:28 -070010372 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010373 skb = rx_buf->skb;
10374 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10375 for (i = ETH_HLEN; i < pkt_size; i++)
10376 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10377 goto test_loopback_rx_exit;
10378
10379 rc = 0;
10380
10381test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010382
Eilon Greensteinca003922009-08-12 22:53:28 -070010383 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10384 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10385 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10386 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010387
10388 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010389 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10390 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010391
10392test_loopback_exit:
10393 bp->link_params.loopback_mode = LOOPBACK_NONE;
10394
10395 return rc;
10396}
10397
10398static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10399{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010400 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010401
10402 if (!netif_running(bp->dev))
10403 return BNX2X_LOOPBACK_FAILED;
10404
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010405 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010406 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010407
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010408 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10409 if (res) {
10410 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10411 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010412 }
10413
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010414 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10415 if (res) {
10416 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10417 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010418 }
10419
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010420 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010421 bnx2x_netif_start(bp);
10422
10423 return rc;
10424}
10425
10426#define CRC32_RESIDUAL 0xdebb20e3
10427
10428static int bnx2x_test_nvram(struct bnx2x *bp)
10429{
10430 static const struct {
10431 int offset;
10432 int size;
10433 } nvram_tbl[] = {
10434 { 0, 0x14 }, /* bootstrap */
10435 { 0x14, 0xec }, /* dir */
10436 { 0x100, 0x350 }, /* manuf_info */
10437 { 0x450, 0xf0 }, /* feature_info */
10438 { 0x640, 0x64 }, /* upgrade_key_info */
10439 { 0x6a4, 0x64 },
10440 { 0x708, 0x70 }, /* manuf_key_info */
10441 { 0x778, 0x70 },
10442 { 0, 0 }
10443 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010444 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010445 u8 *data = (u8 *)buf;
10446 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010447 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010448
10449 rc = bnx2x_nvram_read(bp, 0, data, 4);
10450 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010451 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010452 goto test_nvram_exit;
10453 }
10454
10455 magic = be32_to_cpu(buf[0]);
10456 if (magic != 0x669955aa) {
10457 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10458 rc = -ENODEV;
10459 goto test_nvram_exit;
10460 }
10461
10462 for (i = 0; nvram_tbl[i].size; i++) {
10463
10464 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10465 nvram_tbl[i].size);
10466 if (rc) {
10467 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010468 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010469 goto test_nvram_exit;
10470 }
10471
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010472 crc = ether_crc_le(nvram_tbl[i].size, data);
10473 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010474 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010475 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010476 rc = -ENODEV;
10477 goto test_nvram_exit;
10478 }
10479 }
10480
10481test_nvram_exit:
10482 return rc;
10483}
10484
10485static int bnx2x_test_intr(struct bnx2x *bp)
10486{
10487 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10488 int i, rc;
10489
10490 if (!netif_running(bp->dev))
10491 return -ENODEV;
10492
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010493 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010494 if (CHIP_IS_E1(bp))
10495 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10496 else
10497 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010498 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010499 config->hdr.reserved1 = 0;
10500
Michael Chane665bfd2009-10-10 13:46:54 +000010501 bp->set_mac_pending++;
10502 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010503 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10504 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10505 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10506 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010507 for (i = 0; i < 10; i++) {
10508 if (!bp->set_mac_pending)
10509 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010510 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010511 msleep_interruptible(10);
10512 }
10513 if (i == 10)
10514 rc = -ENODEV;
10515 }
10516
10517 return rc;
10518}
10519
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010520static void bnx2x_self_test(struct net_device *dev,
10521 struct ethtool_test *etest, u64 *buf)
10522{
10523 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010524
10525 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10526
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010527 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010528 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010529
Eilon Greenstein33471622008-08-13 15:59:08 -070010530 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010531 if (IS_E1HMF(bp))
10532 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10533
10534 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010535 int port = BP_PORT(bp);
10536 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010537 u8 link_up;
10538
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010539 /* save current value of input enable for TX port IF */
10540 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10541 /* disable input for TX port IF */
10542 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10543
Eilon Greenstein061bc702009-10-15 00:18:47 -070010544 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010545 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10546 bnx2x_nic_load(bp, LOAD_DIAG);
10547 /* wait until link state is restored */
10548 bnx2x_wait_for_link(bp, link_up);
10549
10550 if (bnx2x_test_registers(bp) != 0) {
10551 buf[0] = 1;
10552 etest->flags |= ETH_TEST_FL_FAILED;
10553 }
10554 if (bnx2x_test_memory(bp) != 0) {
10555 buf[1] = 1;
10556 etest->flags |= ETH_TEST_FL_FAILED;
10557 }
10558 buf[2] = bnx2x_test_loopback(bp, link_up);
10559 if (buf[2] != 0)
10560 etest->flags |= ETH_TEST_FL_FAILED;
10561
10562 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010563
10564 /* restore input for TX port IF */
10565 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10566
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010567 bnx2x_nic_load(bp, LOAD_NORMAL);
10568 /* wait until link state is restored */
10569 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010570 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010571 if (bnx2x_test_nvram(bp) != 0) {
10572 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010573 etest->flags |= ETH_TEST_FL_FAILED;
10574 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010575 if (bnx2x_test_intr(bp) != 0) {
10576 buf[4] = 1;
10577 etest->flags |= ETH_TEST_FL_FAILED;
10578 }
10579 if (bp->port.pmf)
10580 if (bnx2x_link_test(bp) != 0) {
10581 buf[5] = 1;
10582 etest->flags |= ETH_TEST_FL_FAILED;
10583 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010584
10585#ifdef BNX2X_EXTRA_DEBUG
10586 bnx2x_panic_dump(bp);
10587#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010588}
10589
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010590static const struct {
10591 long offset;
10592 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010593 u8 string[ETH_GSTRING_LEN];
10594} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10595/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10596 { Q_STATS_OFFSET32(error_bytes_received_hi),
10597 8, "[%d]: rx_error_bytes" },
10598 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10599 8, "[%d]: rx_ucast_packets" },
10600 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10601 8, "[%d]: rx_mcast_packets" },
10602 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10603 8, "[%d]: rx_bcast_packets" },
10604 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10605 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10606 4, "[%d]: rx_phy_ip_err_discards"},
10607 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10608 4, "[%d]: rx_skb_alloc_discard" },
10609 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10610
10611/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10612 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10613 8, "[%d]: tx_packets" }
10614};
10615
10616static const struct {
10617 long offset;
10618 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010619 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010620#define STATS_FLAGS_PORT 1
10621#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010622#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010623 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010624} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010625/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10626 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010627 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010628 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010629 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010630 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010631 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010632 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010633 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010634 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010635 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010636 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010637 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010638 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010639 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10640 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10641 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10642 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10643/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10644 8, STATS_FLAGS_PORT, "rx_fragments" },
10645 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10646 8, STATS_FLAGS_PORT, "rx_jabbers" },
10647 { STATS_OFFSET32(no_buff_discard_hi),
10648 8, STATS_FLAGS_BOTH, "rx_discards" },
10649 { STATS_OFFSET32(mac_filter_discard),
10650 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10651 { STATS_OFFSET32(xxoverflow_discard),
10652 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10653 { STATS_OFFSET32(brb_drop_hi),
10654 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10655 { STATS_OFFSET32(brb_truncate_hi),
10656 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10657 { STATS_OFFSET32(pause_frames_received_hi),
10658 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10659 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10660 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10661 { STATS_OFFSET32(nig_timer_max),
10662 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10663/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10664 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10665 { STATS_OFFSET32(rx_skb_alloc_failed),
10666 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10667 { STATS_OFFSET32(hw_csum_err),
10668 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10669
10670 { STATS_OFFSET32(total_bytes_transmitted_hi),
10671 8, STATS_FLAGS_BOTH, "tx_bytes" },
10672 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10673 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10674 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10675 8, STATS_FLAGS_BOTH, "tx_packets" },
10676 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10677 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10678 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10679 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010680 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010681 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010682 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010683 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010684/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010685 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010686 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010687 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010688 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010689 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010690 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010691 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010692 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010693 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010694 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010695 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010696 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010697 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010698 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010699 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010700 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010701 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010702 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010703 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010704/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010705 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010706 { STATS_OFFSET32(pause_frames_sent_hi),
10707 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010708};
10709
Eilon Greensteinde832a52009-02-12 08:36:33 +000010710#define IS_PORT_STAT(i) \
10711 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10712#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10713#define IS_E1HMF_MODE_STAT(bp) \
10714 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010715
Ben Hutchings15f0a392009-10-01 11:58:24 +000010716static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10717{
10718 struct bnx2x *bp = netdev_priv(dev);
10719 int i, num_stats;
10720
10721 switch(stringset) {
10722 case ETH_SS_STATS:
10723 if (is_multi(bp)) {
10724 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10725 if (!IS_E1HMF_MODE_STAT(bp))
10726 num_stats += BNX2X_NUM_STATS;
10727 } else {
10728 if (IS_E1HMF_MODE_STAT(bp)) {
10729 num_stats = 0;
10730 for (i = 0; i < BNX2X_NUM_STATS; i++)
10731 if (IS_FUNC_STAT(i))
10732 num_stats++;
10733 } else
10734 num_stats = BNX2X_NUM_STATS;
10735 }
10736 return num_stats;
10737
10738 case ETH_SS_TEST:
10739 return BNX2X_NUM_TESTS;
10740
10741 default:
10742 return -EINVAL;
10743 }
10744}
10745
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010746static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10747{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010748 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010749 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010750
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010751 switch (stringset) {
10752 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010753 if (is_multi(bp)) {
10754 k = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010755 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010756 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10757 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10758 bnx2x_q_stats_arr[j].string, i);
10759 k += BNX2X_NUM_Q_STATS;
10760 }
10761 if (IS_E1HMF_MODE_STAT(bp))
10762 break;
10763 for (j = 0; j < BNX2X_NUM_STATS; j++)
10764 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10765 bnx2x_stats_arr[j].string);
10766 } else {
10767 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10768 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10769 continue;
10770 strcpy(buf + j*ETH_GSTRING_LEN,
10771 bnx2x_stats_arr[i].string);
10772 j++;
10773 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010774 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010775 break;
10776
10777 case ETH_SS_TEST:
10778 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10779 break;
10780 }
10781}
10782
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010783static void bnx2x_get_ethtool_stats(struct net_device *dev,
10784 struct ethtool_stats *stats, u64 *buf)
10785{
10786 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010787 u32 *hw_stats, *offset;
10788 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010789
Eilon Greensteinde832a52009-02-12 08:36:33 +000010790 if (is_multi(bp)) {
10791 k = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010792 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010793 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10794 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10795 if (bnx2x_q_stats_arr[j].size == 0) {
10796 /* skip this counter */
10797 buf[k + j] = 0;
10798 continue;
10799 }
10800 offset = (hw_stats +
10801 bnx2x_q_stats_arr[j].offset);
10802 if (bnx2x_q_stats_arr[j].size == 4) {
10803 /* 4-byte counter */
10804 buf[k + j] = (u64) *offset;
10805 continue;
10806 }
10807 /* 8-byte counter */
10808 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10809 }
10810 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010811 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010812 if (IS_E1HMF_MODE_STAT(bp))
10813 return;
10814 hw_stats = (u32 *)&bp->eth_stats;
10815 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10816 if (bnx2x_stats_arr[j].size == 0) {
10817 /* skip this counter */
10818 buf[k + j] = 0;
10819 continue;
10820 }
10821 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10822 if (bnx2x_stats_arr[j].size == 4) {
10823 /* 4-byte counter */
10824 buf[k + j] = (u64) *offset;
10825 continue;
10826 }
10827 /* 8-byte counter */
10828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010829 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010830 } else {
10831 hw_stats = (u32 *)&bp->eth_stats;
10832 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10833 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10834 continue;
10835 if (bnx2x_stats_arr[i].size == 0) {
10836 /* skip this counter */
10837 buf[j] = 0;
10838 j++;
10839 continue;
10840 }
10841 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10842 if (bnx2x_stats_arr[i].size == 4) {
10843 /* 4-byte counter */
10844 buf[j] = (u64) *offset;
10845 j++;
10846 continue;
10847 }
10848 /* 8-byte counter */
10849 buf[j] = HILO_U64(*offset, *(offset + 1));
10850 j++;
10851 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010852 }
10853}
10854
10855static int bnx2x_phys_id(struct net_device *dev, u32 data)
10856{
10857 struct bnx2x *bp = netdev_priv(dev);
10858 int i;
10859
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010860 if (!netif_running(dev))
10861 return 0;
10862
10863 if (!bp->port.pmf)
10864 return 0;
10865
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010866 if (data == 0)
10867 data = 2;
10868
10869 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010870 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010871 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10872 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010873 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010874 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010875
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010876 msleep_interruptible(500);
10877 if (signal_pending(current))
10878 break;
10879 }
10880
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010881 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010882 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10883 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010884
10885 return 0;
10886}
10887
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010888static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010889 .get_settings = bnx2x_get_settings,
10890 .set_settings = bnx2x_set_settings,
10891 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010892 .get_regs_len = bnx2x_get_regs_len,
10893 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010894 .get_wol = bnx2x_get_wol,
10895 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010896 .get_msglevel = bnx2x_get_msglevel,
10897 .set_msglevel = bnx2x_set_msglevel,
10898 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010899 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010900 .get_eeprom_len = bnx2x_get_eeprom_len,
10901 .get_eeprom = bnx2x_get_eeprom,
10902 .set_eeprom = bnx2x_set_eeprom,
10903 .get_coalesce = bnx2x_get_coalesce,
10904 .set_coalesce = bnx2x_set_coalesce,
10905 .get_ringparam = bnx2x_get_ringparam,
10906 .set_ringparam = bnx2x_set_ringparam,
10907 .get_pauseparam = bnx2x_get_pauseparam,
10908 .set_pauseparam = bnx2x_set_pauseparam,
10909 .get_rx_csum = bnx2x_get_rx_csum,
10910 .set_rx_csum = bnx2x_set_rx_csum,
10911 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010912 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010913 .set_flags = bnx2x_set_flags,
10914 .get_flags = ethtool_op_get_flags,
10915 .get_sg = ethtool_op_get_sg,
10916 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010917 .get_tso = ethtool_op_get_tso,
10918 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010919 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010920 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010921 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010922 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010923 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010924};
10925
10926/* end of ethtool_ops */
10927
10928/****************************************************************************
10929* General service functions
10930****************************************************************************/
10931
10932static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10933{
10934 u16 pmcsr;
10935
10936 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10937
10938 switch (state) {
10939 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010940 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010941 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10942 PCI_PM_CTRL_PME_STATUS));
10943
10944 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010945 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010946 msleep(20);
10947 break;
10948
10949 case PCI_D3hot:
10950 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10951 pmcsr |= 3;
10952
10953 if (bp->wol)
10954 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10955
10956 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10957 pmcsr);
10958
10959 /* No more memory access after this point until
10960 * device is brought back to D0.
10961 */
10962 break;
10963
10964 default:
10965 return -EINVAL;
10966 }
10967 return 0;
10968}
10969
Eilon Greenstein237907c2009-01-14 06:42:44 +000010970static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10971{
10972 u16 rx_cons_sb;
10973
10974 /* Tell compiler that status block fields can change */
10975 barrier();
10976 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10977 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10978 rx_cons_sb++;
10979 return (fp->rx_comp_cons != rx_cons_sb);
10980}
10981
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010982/*
10983 * net_device service functions
10984 */
10985
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010986static int bnx2x_poll(struct napi_struct *napi, int budget)
10987{
10988 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10989 napi);
10990 struct bnx2x *bp = fp->bp;
10991 int work_done = 0;
10992
10993#ifdef BNX2X_STOP_ON_ERROR
10994 if (unlikely(bp->panic))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010995 goto poll_panic;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010996#endif
10997
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010998 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10999 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11000
11001 bnx2x_update_fpsb_idx(fp);
11002
Eilon Greenstein8534f322009-03-02 07:59:45 +000011003 if (bnx2x_has_rx_work(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011004 work_done = bnx2x_rx_int(fp, budget);
Eilon Greenstein356e2382009-02-12 08:38:32 +000011005
Eilon Greenstein8534f322009-03-02 07:59:45 +000011006 /* must not complete if we consumed full budget */
11007 if (work_done >= budget)
11008 goto poll_again;
11009 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011010
Eilon Greensteinca003922009-08-12 22:53:28 -070011011 /* bnx2x_has_rx_work() reads the status block, thus we need to
Eilon Greenstein8534f322009-03-02 07:59:45 +000011012 * ensure that status block indices have been actually read
Eilon Greensteinca003922009-08-12 22:53:28 -070011013 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
Eilon Greenstein8534f322009-03-02 07:59:45 +000011014 * so that we won't write the "newer" value of the status block to IGU
Eilon Greensteinca003922009-08-12 22:53:28 -070011015 * (if there was a DMA right after bnx2x_has_rx_work and
Eilon Greenstein8534f322009-03-02 07:59:45 +000011016 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11017 * may be postponed to right before bnx2x_ack_sb). In this case
11018 * there will never be another interrupt until there is another update
11019 * of the status block, while there is still unhandled work.
11020 */
11021 rmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011022
Eilon Greensteinca003922009-08-12 22:53:28 -070011023 if (!bnx2x_has_rx_work(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011024#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011025poll_panic:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011026#endif
Ben Hutchings288379f2009-01-19 16:43:59 -080011027 napi_complete(napi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011028
Eilon Greenstein0626b892009-02-12 08:38:14 +000011029 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011030 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000011031 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011032 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11033 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000011034
Eilon Greenstein8534f322009-03-02 07:59:45 +000011035poll_again:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011036 return work_done;
11037}
11038
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011039
11040/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070011041 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011042 * we use one mapping for both BDs
11043 * So far this has only been observed to happen
11044 * in Other Operating Systems(TM)
11045 */
11046static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11047 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070011048 struct sw_tx_bd *tx_buf,
11049 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011050 u16 bd_prod, int nbd)
11051{
Eilon Greensteinca003922009-08-12 22:53:28 -070011052 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011053 struct eth_tx_bd *d_tx_bd;
11054 dma_addr_t mapping;
11055 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11056
11057 /* first fix first BD */
11058 h_tx_bd->nbd = cpu_to_le16(nbd);
11059 h_tx_bd->nbytes = cpu_to_le16(hlen);
11060
11061 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11062 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11063 h_tx_bd->addr_lo, h_tx_bd->nbd);
11064
11065 /* now get a new data BD
11066 * (after the pbd) and fill it */
11067 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011068 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011069
11070 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11071 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11072
11073 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11074 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11075 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011076
11077 /* this marks the BD as one that has no individual mapping */
11078 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11079
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011080 DP(NETIF_MSG_TX_QUEUED,
11081 "TSO split data size is %d (%x:%x)\n",
11082 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11083
Eilon Greensteinca003922009-08-12 22:53:28 -070011084 /* update tx_bd */
11085 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011086
11087 return bd_prod;
11088}
11089
11090static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11091{
11092 if (fix > 0)
11093 csum = (u16) ~csum_fold(csum_sub(csum,
11094 csum_partial(t_header - fix, fix, 0)));
11095
11096 else if (fix < 0)
11097 csum = (u16) ~csum_fold(csum_add(csum,
11098 csum_partial(t_header, -fix, 0)));
11099
11100 return swab16(csum);
11101}
11102
11103static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11104{
11105 u32 rc;
11106
11107 if (skb->ip_summed != CHECKSUM_PARTIAL)
11108 rc = XMIT_PLAIN;
11109
11110 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011111 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011112 rc = XMIT_CSUM_V6;
11113 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11114 rc |= XMIT_CSUM_TCP;
11115
11116 } else {
11117 rc = XMIT_CSUM_V4;
11118 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11119 rc |= XMIT_CSUM_TCP;
11120 }
11121 }
11122
11123 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11124 rc |= XMIT_GSO_V4;
11125
11126 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11127 rc |= XMIT_GSO_V6;
11128
11129 return rc;
11130}
11131
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011132#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011133/* check if packet requires linearization (packet is too fragmented)
11134 no need to check fragmentation if page size > 8K (there will be no
11135 violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011136static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11137 u32 xmit_type)
11138{
11139 int to_copy = 0;
11140 int hlen = 0;
11141 int first_bd_sz = 0;
11142
11143 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11144 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11145
11146 if (xmit_type & XMIT_GSO) {
11147 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11148 /* Check if LSO packet needs to be copied:
11149 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11150 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011151 /* Number of windows to check */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011152 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11153 int wnd_idx = 0;
11154 int frag_idx = 0;
11155 u32 wnd_sum = 0;
11156
11157 /* Headers length */
11158 hlen = (int)(skb_transport_header(skb) - skb->data) +
11159 tcp_hdrlen(skb);
11160
11161 /* Amount of data (w/o headers) on linear part of SKB*/
11162 first_bd_sz = skb_headlen(skb) - hlen;
11163
11164 wnd_sum = first_bd_sz;
11165
11166 /* Calculate the first sum - it's special */
11167 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11168 wnd_sum +=
11169 skb_shinfo(skb)->frags[frag_idx].size;
11170
11171 /* If there was data on linear skb data - check it */
11172 if (first_bd_sz > 0) {
11173 if (unlikely(wnd_sum < lso_mss)) {
11174 to_copy = 1;
11175 goto exit_lbl;
11176 }
11177
11178 wnd_sum -= first_bd_sz;
11179 }
11180
11181 /* Others are easier: run through the frag list and
11182 check all windows */
11183 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11184 wnd_sum +=
11185 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11186
11187 if (unlikely(wnd_sum < lso_mss)) {
11188 to_copy = 1;
11189 break;
11190 }
11191 wnd_sum -=
11192 skb_shinfo(skb)->frags[wnd_idx].size;
11193 }
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011194 } else {
11195 /* in non-LSO too fragmented packet should always
11196 be linearized */
11197 to_copy = 1;
11198 }
11199 }
11200
11201exit_lbl:
11202 if (unlikely(to_copy))
11203 DP(NETIF_MSG_TX_QUEUED,
11204 "Linearization IS REQUIRED for %s packet. "
11205 "num_frags %d hlen %d first_bd_sz %d\n",
11206 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11207 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11208
11209 return to_copy;
11210}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011211#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011212
11213/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011214 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011215 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011216 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011217static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011218{
11219 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinca003922009-08-12 22:53:28 -070011220 struct bnx2x_fastpath *fp, *fp_stat;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011221 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011222 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011223 struct eth_tx_start_bd *tx_start_bd;
11224 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011225 struct eth_tx_parse_bd *pbd = NULL;
11226 u16 pkt_prod, bd_prod;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011227 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011228 dma_addr_t mapping;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011229 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011230 int i;
11231 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011232 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011233
11234#ifdef BNX2X_STOP_ON_ERROR
11235 if (unlikely(bp->panic))
11236 return NETDEV_TX_BUSY;
11237#endif
11238
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011239 fp_index = skb_get_queue_mapping(skb);
11240 txq = netdev_get_tx_queue(dev, fp_index);
11241
Eilon Greensteinca003922009-08-12 22:53:28 -070011242 fp = &bp->fp[fp_index + bp->num_rx_queues];
11243 fp_stat = &bp->fp[fp_index];
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011244
Yitchak Gertner231fd582008-08-25 15:27:06 -070011245 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011246 fp_stat->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011247 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011248 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11249 return NETDEV_TX_BUSY;
11250 }
11251
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011252 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11253 " gso type %x xmit_type %x\n",
11254 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11255 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11256
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011257#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011258 /* First, check if we need to linearize the skb (due to FW
11259 restrictions). No need to check fragmentation if page size > 8K
11260 (there will be no violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011261 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11262 /* Statistics of linearization */
11263 bp->lin_cnt++;
11264 if (skb_linearize(skb) != 0) {
11265 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11266 "silently dropping this SKB\n");
11267 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011268 return NETDEV_TX_OK;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011269 }
11270 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011271#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011272
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011273 /*
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011274 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011275 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011276 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011277 (don't forget to mark the last one as last,
11278 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011279 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011280 */
11281
11282 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011283 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011284
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011285 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011286 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011287 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011288
Eilon Greensteinca003922009-08-12 22:53:28 -070011289 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11290 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11291 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011292 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011293 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011294
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011295 /* remember the first BD of the packet */
11296 tx_buf->first_bd = fp->tx_bd_prod;
11297 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011298 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011299
11300 DP(NETIF_MSG_TX_QUEUED,
11301 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011302 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011303
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011304#ifdef BCM_VLAN
11305 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11306 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011307 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11308 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011309 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011310#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011311 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011312
Eilon Greensteinca003922009-08-12 22:53:28 -070011313 /* turn on parsing and get a BD */
11314 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11315 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011316
Eilon Greensteinca003922009-08-12 22:53:28 -070011317 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011318
11319 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011320 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011321
11322 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011323 pbd->global_data =
11324 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11325 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011326
11327 pbd->ip_hlen = (skb_transport_header(skb) -
11328 skb_network_header(skb)) / 2;
11329
11330 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11331
11332 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011333 hlen = hlen*2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011334
Eilon Greensteinca003922009-08-12 22:53:28 -070011335 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011336
11337 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011338 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011339 ETH_TX_BD_FLAGS_IP_CSUM;
11340 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011341 tx_start_bd->bd_flags.as_bitfield |=
11342 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011343
11344 if (xmit_type & XMIT_CSUM_TCP) {
11345 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11346
11347 } else {
11348 s8 fix = SKB_CS_OFF(skb); /* signed! */
11349
Eilon Greensteinca003922009-08-12 22:53:28 -070011350 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011351
11352 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011353 "hlen %d fix %d csum before fix %x\n",
11354 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011355
11356 /* HW bug: fixup the CSUM */
11357 pbd->tcp_pseudo_csum =
11358 bnx2x_csum_fix(skb_transport_header(skb),
11359 SKB_CS(skb), fix);
11360
11361 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11362 pbd->tcp_pseudo_csum);
11363 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011364 }
11365
11366 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011367 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011368
Eilon Greensteinca003922009-08-12 22:53:28 -070011369 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11370 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11371 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11372 tx_start_bd->nbd = cpu_to_le16(nbd);
11373 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11374 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011375
11376 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011377 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011378 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11379 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11380 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011381
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011382 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011383
11384 DP(NETIF_MSG_TX_QUEUED,
11385 "TSO packet len %d hlen %d total len %d tso size %d\n",
11386 skb->len, hlen, skb_headlen(skb),
11387 skb_shinfo(skb)->gso_size);
11388
Eilon Greensteinca003922009-08-12 22:53:28 -070011389 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011390
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011391 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011392 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11393 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011394
11395 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11396 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011397 pbd->tcp_flags = pbd_tcp_flags(skb);
11398
11399 if (xmit_type & XMIT_GSO_V4) {
11400 pbd->ip_id = swab16(ip_hdr(skb)->id);
11401 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011402 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11403 ip_hdr(skb)->daddr,
11404 0, IPPROTO_TCP, 0));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011405
11406 } else
11407 pbd->tcp_pseudo_csum =
11408 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11409 &ipv6_hdr(skb)->daddr,
11410 0, IPPROTO_TCP, 0));
11411
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011412 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11413 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011414 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011415
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011416 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11417 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011418
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011419 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011420 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11421 if (total_pkt_bd == NULL)
11422 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011423
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011424 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11425 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011426
Eilon Greensteinca003922009-08-12 22:53:28 -070011427 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11428 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11429 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11430 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011431
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011432 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011433 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11434 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11435 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011436 }
11437
Eilon Greensteinca003922009-08-12 22:53:28 -070011438 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011439
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011440 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11441
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011442 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011443 * if the packet contains or ends with it
11444 */
11445 if (TX_BD_POFF(bd_prod) < nbd)
11446 nbd++;
11447
Eilon Greensteinca003922009-08-12 22:53:28 -070011448 if (total_pkt_bd != NULL)
11449 total_pkt_bd->total_pkt_bytes = pkt_size;
11450
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011451 if (pbd)
11452 DP(NETIF_MSG_TX_QUEUED,
11453 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11454 " tcp_flags %x xsum %x seq %u hlen %u\n",
11455 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11456 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011457 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011458
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011459 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011460
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011461 /*
11462 * Make sure that the BD data is updated before updating the producer
11463 * since FW might read the BD right after the producer is updated.
11464 * This is only applicable for weak-ordered memory model archs such
11465 * as IA-64. The following barrier is also mandatory since FW will
11466 * assumes packets must have BDs.
11467 */
11468 wmb();
11469
Eilon Greensteinca003922009-08-12 22:53:28 -070011470 fp->tx_db.data.prod += nbd;
11471 barrier();
11472 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011473
11474 mmiowb();
11475
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011476 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011477
11478 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011479 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011480 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11481 if we put Tx into XOFF state. */
11482 smp_mb();
Eilon Greensteinca003922009-08-12 22:53:28 -070011483 fp_stat->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011484 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011485 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011486 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011487 fp_stat->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011488
11489 return NETDEV_TX_OK;
11490}
11491
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011492/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011493static int bnx2x_open(struct net_device *dev)
11494{
11495 struct bnx2x *bp = netdev_priv(dev);
11496
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011497 netif_carrier_off(dev);
11498
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011499 bnx2x_set_power_state(bp, PCI_D0);
11500
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011501 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011502}
11503
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011504/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011505static int bnx2x_close(struct net_device *dev)
11506{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011507 struct bnx2x *bp = netdev_priv(dev);
11508
11509 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011510 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11511 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11512 if (!CHIP_REV_IS_SLOW(bp))
11513 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011514
11515 return 0;
11516}
11517
Eilon Greensteinf5372252009-02-12 08:38:30 +000011518/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011519static void bnx2x_set_rx_mode(struct net_device *dev)
11520{
11521 struct bnx2x *bp = netdev_priv(dev);
11522 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11523 int port = BP_PORT(bp);
11524
11525 if (bp->state != BNX2X_STATE_OPEN) {
11526 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11527 return;
11528 }
11529
11530 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11531
11532 if (dev->flags & IFF_PROMISC)
11533 rx_mode = BNX2X_RX_MODE_PROMISC;
11534
11535 else if ((dev->flags & IFF_ALLMULTI) ||
11536 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11537 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11538
11539 else { /* some multicasts */
11540 if (CHIP_IS_E1(bp)) {
11541 int i, old, offset;
11542 struct dev_mc_list *mclist;
11543 struct mac_configuration_cmd *config =
11544 bnx2x_sp(bp, mcast_config);
11545
11546 for (i = 0, mclist = dev->mc_list;
11547 mclist && (i < dev->mc_count);
11548 i++, mclist = mclist->next) {
11549
11550 config->config_table[i].
11551 cam_entry.msb_mac_addr =
11552 swab16(*(u16 *)&mclist->dmi_addr[0]);
11553 config->config_table[i].
11554 cam_entry.middle_mac_addr =
11555 swab16(*(u16 *)&mclist->dmi_addr[2]);
11556 config->config_table[i].
11557 cam_entry.lsb_mac_addr =
11558 swab16(*(u16 *)&mclist->dmi_addr[4]);
11559 config->config_table[i].cam_entry.flags =
11560 cpu_to_le16(port);
11561 config->config_table[i].
11562 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011563 config->config_table[i].target_table_entry.
11564 clients_bit_vector =
11565 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011566 config->config_table[i].
11567 target_table_entry.vlan_id = 0;
11568
11569 DP(NETIF_MSG_IFUP,
11570 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11571 config->config_table[i].
11572 cam_entry.msb_mac_addr,
11573 config->config_table[i].
11574 cam_entry.middle_mac_addr,
11575 config->config_table[i].
11576 cam_entry.lsb_mac_addr);
11577 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011578 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011579 if (old > i) {
11580 for (; i < old; i++) {
11581 if (CAM_IS_INVALID(config->
11582 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011583 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011584 break;
11585 }
11586 /* invalidate */
11587 CAM_INVALIDATE(config->
11588 config_table[i]);
11589 }
11590 }
11591
11592 if (CHIP_REV_IS_SLOW(bp))
11593 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11594 else
11595 offset = BNX2X_MAX_MULTICAST*(1 + port);
11596
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011597 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011598 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011599 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011600 config->hdr.reserved1 = 0;
11601
Michael Chane665bfd2009-10-10 13:46:54 +000011602 bp->set_mac_pending++;
11603 smp_wmb();
11604
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011605 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11606 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11607 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11608 0);
11609 } else { /* E1H */
11610 /* Accept one or more multicasts */
11611 struct dev_mc_list *mclist;
11612 u32 mc_filter[MC_HASH_SIZE];
11613 u32 crc, bit, regidx;
11614 int i;
11615
11616 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11617
11618 for (i = 0, mclist = dev->mc_list;
11619 mclist && (i < dev->mc_count);
11620 i++, mclist = mclist->next) {
11621
Johannes Berg7c510e42008-10-27 17:47:26 -070011622 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11623 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011624
11625 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11626 bit = (crc >> 24) & 0xff;
11627 regidx = bit >> 5;
11628 bit &= 0x1f;
11629 mc_filter[regidx] |= (1 << bit);
11630 }
11631
11632 for (i = 0; i < MC_HASH_SIZE; i++)
11633 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11634 mc_filter[i]);
11635 }
11636 }
11637
11638 bp->rx_mode = rx_mode;
11639 bnx2x_set_storm_rx_mode(bp);
11640}
11641
11642/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011643static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11644{
11645 struct sockaddr *addr = p;
11646 struct bnx2x *bp = netdev_priv(dev);
11647
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011648 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011649 return -EINVAL;
11650
11651 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011652 if (netif_running(dev)) {
11653 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011654 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011655 else
Michael Chane665bfd2009-10-10 13:46:54 +000011656 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011657 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011658
11659 return 0;
11660}
11661
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011662/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011663static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11664 int devad, u16 addr)
11665{
11666 struct bnx2x *bp = netdev_priv(netdev);
11667 u16 value;
11668 int rc;
11669 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11670
11671 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11672 prtad, devad, addr);
11673
11674 if (prtad != bp->mdio.prtad) {
11675 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11676 prtad, bp->mdio.prtad);
11677 return -EINVAL;
11678 }
11679
11680 /* The HW expects different devad if CL22 is used */
11681 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11682
11683 bnx2x_acquire_phy_lock(bp);
11684 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11685 devad, addr, &value);
11686 bnx2x_release_phy_lock(bp);
11687 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11688
11689 if (!rc)
11690 rc = value;
11691 return rc;
11692}
11693
11694/* called with rtnl_lock */
11695static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11696 u16 addr, u16 value)
11697{
11698 struct bnx2x *bp = netdev_priv(netdev);
11699 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11700 int rc;
11701
11702 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11703 " value 0x%x\n", prtad, devad, addr, value);
11704
11705 if (prtad != bp->mdio.prtad) {
11706 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11707 prtad, bp->mdio.prtad);
11708 return -EINVAL;
11709 }
11710
11711 /* The HW expects different devad if CL22 is used */
11712 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11713
11714 bnx2x_acquire_phy_lock(bp);
11715 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11716 devad, addr, value);
11717 bnx2x_release_phy_lock(bp);
11718 return rc;
11719}
11720
11721/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011722static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11723{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011724 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011725 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011726
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011727 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11728 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011729
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011730 if (!netif_running(dev))
11731 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011732
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011733 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011734}
11735
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011736/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011737static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11738{
11739 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011740 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011741
11742 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11743 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11744 return -EINVAL;
11745
11746 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011747 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011748 * only updated as part of load
11749 */
11750 dev->mtu = new_mtu;
11751
11752 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011753 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11754 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011755 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011756
11757 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011758}
11759
11760static void bnx2x_tx_timeout(struct net_device *dev)
11761{
11762 struct bnx2x *bp = netdev_priv(dev);
11763
11764#ifdef BNX2X_STOP_ON_ERROR
11765 if (!bp->panic)
11766 bnx2x_panic();
11767#endif
11768 /* This allows the netif to be shutdown gracefully before resetting */
11769 schedule_work(&bp->reset_task);
11770}
11771
11772#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011773/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011774static void bnx2x_vlan_rx_register(struct net_device *dev,
11775 struct vlan_group *vlgrp)
11776{
11777 struct bnx2x *bp = netdev_priv(dev);
11778
11779 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011780
11781 /* Set flags according to the required capabilities */
11782 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11783
11784 if (dev->features & NETIF_F_HW_VLAN_TX)
11785 bp->flags |= HW_VLAN_TX_FLAG;
11786
11787 if (dev->features & NETIF_F_HW_VLAN_RX)
11788 bp->flags |= HW_VLAN_RX_FLAG;
11789
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011790 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011791 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011792}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011793
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011794#endif
11795
11796#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11797static void poll_bnx2x(struct net_device *dev)
11798{
11799 struct bnx2x *bp = netdev_priv(dev);
11800
11801 disable_irq(bp->pdev->irq);
11802 bnx2x_interrupt(bp->pdev->irq, dev);
11803 enable_irq(bp->pdev->irq);
11804}
11805#endif
11806
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011807static const struct net_device_ops bnx2x_netdev_ops = {
11808 .ndo_open = bnx2x_open,
11809 .ndo_stop = bnx2x_close,
11810 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011811 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011812 .ndo_set_mac_address = bnx2x_change_mac_addr,
11813 .ndo_validate_addr = eth_validate_addr,
11814 .ndo_do_ioctl = bnx2x_ioctl,
11815 .ndo_change_mtu = bnx2x_change_mtu,
11816 .ndo_tx_timeout = bnx2x_tx_timeout,
11817#ifdef BCM_VLAN
11818 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11819#endif
11820#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11821 .ndo_poll_controller = poll_bnx2x,
11822#endif
11823};
11824
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011825static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11826 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011827{
11828 struct bnx2x *bp;
11829 int rc;
11830
11831 SET_NETDEV_DEV(dev, &pdev->dev);
11832 bp = netdev_priv(dev);
11833
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011834 bp->dev = dev;
11835 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011836 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011837 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011838
11839 rc = pci_enable_device(pdev);
11840 if (rc) {
11841 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11842 goto err_out;
11843 }
11844
11845 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11846 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11847 " aborting\n");
11848 rc = -ENODEV;
11849 goto err_out_disable;
11850 }
11851
11852 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11853 printk(KERN_ERR PFX "Cannot find second PCI device"
11854 " base address, aborting\n");
11855 rc = -ENODEV;
11856 goto err_out_disable;
11857 }
11858
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011859 if (atomic_read(&pdev->enable_cnt) == 1) {
11860 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11861 if (rc) {
11862 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11863 " aborting\n");
11864 goto err_out_disable;
11865 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011866
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011867 pci_set_master(pdev);
11868 pci_save_state(pdev);
11869 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011870
11871 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11872 if (bp->pm_cap == 0) {
11873 printk(KERN_ERR PFX "Cannot find power management"
11874 " capability, aborting\n");
11875 rc = -EIO;
11876 goto err_out_release;
11877 }
11878
11879 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11880 if (bp->pcie_cap == 0) {
11881 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11882 " aborting\n");
11883 rc = -EIO;
11884 goto err_out_release;
11885 }
11886
Yang Hongyang6a355282009-04-06 19:01:13 -070011887 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011888 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011889 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011890 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11891 " failed, aborting\n");
11892 rc = -EIO;
11893 goto err_out_release;
11894 }
11895
Yang Hongyang284901a2009-04-06 19:01:15 -070011896 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011897 printk(KERN_ERR PFX "System does not support DMA,"
11898 " aborting\n");
11899 rc = -EIO;
11900 goto err_out_release;
11901 }
11902
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011903 dev->mem_start = pci_resource_start(pdev, 0);
11904 dev->base_addr = dev->mem_start;
11905 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011906
11907 dev->irq = pdev->irq;
11908
Arjan van de Ven275f1652008-10-20 21:42:39 -070011909 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011910 if (!bp->regview) {
11911 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11912 rc = -ENOMEM;
11913 goto err_out_release;
11914 }
11915
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011916 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11917 min_t(u64, BNX2X_DB_SIZE,
11918 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011919 if (!bp->doorbells) {
11920 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11921 rc = -ENOMEM;
11922 goto err_out_unmap;
11923 }
11924
11925 bnx2x_set_power_state(bp, PCI_D0);
11926
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011927 /* clean indirect addresses */
11928 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11929 PCICFG_VENDOR_ID_OFFSET);
11930 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11931 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11932 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11933 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011934
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011935 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011936
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011937 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011938 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011939 dev->features |= NETIF_F_SG;
11940 dev->features |= NETIF_F_HW_CSUM;
11941 if (bp->flags & USING_DAC_FLAG)
11942 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011943 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11944 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011945#ifdef BCM_VLAN
11946 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011947 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011948
11949 dev->vlan_features |= NETIF_F_SG;
11950 dev->vlan_features |= NETIF_F_HW_CSUM;
11951 if (bp->flags & USING_DAC_FLAG)
11952 dev->vlan_features |= NETIF_F_HIGHDMA;
11953 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11954 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011955#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011956
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011957 /* get_port_hwinfo() will set prtad and mmds properly */
11958 bp->mdio.prtad = MDIO_PRTAD_NONE;
11959 bp->mdio.mmds = 0;
11960 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11961 bp->mdio.dev = dev;
11962 bp->mdio.mdio_read = bnx2x_mdio_read;
11963 bp->mdio.mdio_write = bnx2x_mdio_write;
11964
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011965 return 0;
11966
11967err_out_unmap:
11968 if (bp->regview) {
11969 iounmap(bp->regview);
11970 bp->regview = NULL;
11971 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011972 if (bp->doorbells) {
11973 iounmap(bp->doorbells);
11974 bp->doorbells = NULL;
11975 }
11976
11977err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011978 if (atomic_read(&pdev->enable_cnt) == 1)
11979 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011980
11981err_out_disable:
11982 pci_disable_device(pdev);
11983 pci_set_drvdata(pdev, NULL);
11984
11985err_out:
11986 return rc;
11987}
11988
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011989static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11990 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011991{
11992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11993
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011994 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11995
11996 /* return value of 1=2.5GHz 2=5GHz */
11997 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011998}
11999
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012000static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12001{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012002 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012003 struct bnx2x_fw_file_hdr *fw_hdr;
12004 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012005 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012006 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012007 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012008 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012009
12010 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12011 return -EINVAL;
12012
12013 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12014 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12015
12016 /* Make sure none of the offsets and sizes make us read beyond
12017 * the end of the firmware data */
12018 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12019 offset = be32_to_cpu(sections[i].offset);
12020 len = be32_to_cpu(sections[i].len);
12021 if (offset + len > firmware->size) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012022 printk(KERN_ERR PFX "Section %d length is out of "
12023 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012024 return -EINVAL;
12025 }
12026 }
12027
12028 /* Likewise for the init_ops offsets */
12029 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12030 ops_offsets = (u16 *)(firmware->data + offset);
12031 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12032
12033 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12034 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012035 printk(KERN_ERR PFX "Section offset %d is out of "
12036 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012037 return -EINVAL;
12038 }
12039 }
12040
12041 /* Check FW version */
12042 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12043 fw_ver = firmware->data + offset;
12044 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12045 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12046 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12047 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12048 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12049 " Should be %d.%d.%d.%d\n",
12050 fw_ver[0], fw_ver[1], fw_ver[2],
12051 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12052 BCM_5710_FW_MINOR_VERSION,
12053 BCM_5710_FW_REVISION_VERSION,
12054 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012055 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012056 }
12057
12058 return 0;
12059}
12060
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012061static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012062{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012063 const __be32 *source = (const __be32 *)_source;
12064 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012065 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012066
12067 for (i = 0; i < n/4; i++)
12068 target[i] = be32_to_cpu(source[i]);
12069}
12070
12071/*
12072 Ops array is stored in the following format:
12073 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12074 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012075static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012076{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012077 const __be32 *source = (const __be32 *)_source;
12078 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012079 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012080
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012081 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012082 tmp = be32_to_cpu(source[j]);
12083 target[i].op = (tmp >> 24) & 0xff;
12084 target[i].offset = tmp & 0xffffff;
12085 target[i].raw_data = be32_to_cpu(source[j+1]);
12086 }
12087}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012088
12089static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012090{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012091 const __be16 *source = (const __be16 *)_source;
12092 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012093 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012094
12095 for (i = 0; i < n/2; i++)
12096 target[i] = be16_to_cpu(source[i]);
12097}
12098
12099#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012100 do { \
12101 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12102 bp->arr = kmalloc(len, GFP_KERNEL); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012103 if (!bp->arr) { \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012104 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12105 "for "#arr"\n", len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012106 goto lbl; \
12107 } \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012108 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12109 (u8 *)bp->arr, len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012110 } while (0)
12111
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012112static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12113{
12114 char fw_file_name[40] = {0};
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012115 struct bnx2x_fw_file_hdr *fw_hdr;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012116 int rc, offset;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012117
12118 /* Create a FW file name */
12119 if (CHIP_IS_E1(bp))
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012120 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012121 else
12122 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12123
12124 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12125 BCM_5710_FW_MAJOR_VERSION,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012126 BCM_5710_FW_MINOR_VERSION,
12127 BCM_5710_FW_REVISION_VERSION,
12128 BCM_5710_FW_ENGINEERING_VERSION);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012129
12130 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12131
12132 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12133 if (rc) {
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012134 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12135 fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012136 goto request_firmware_exit;
12137 }
12138
12139 rc = bnx2x_check_firmware(bp);
12140 if (rc) {
12141 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12142 goto request_firmware_exit;
12143 }
12144
12145 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12146
12147 /* Initialize the pointers to the init arrays */
12148 /* Blob */
12149 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12150
12151 /* Opcodes */
12152 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12153
12154 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012155 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12156 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012157
12158 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012159 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12161 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12163 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12165 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12167 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12169 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12171 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12172 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12173 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12174 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012175
12176 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012177
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012178init_offsets_alloc_err:
12179 kfree(bp->init_ops);
12180init_ops_alloc_err:
12181 kfree(bp->init_data);
12182request_firmware_exit:
12183 release_firmware(bp->firmware);
12184
12185 return rc;
12186}
12187
12188
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012189static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12190 const struct pci_device_id *ent)
12191{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012192 struct net_device *dev = NULL;
12193 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012194 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012195 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012196
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012197 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012198 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012199 if (!dev) {
12200 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012201 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012202 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012203
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012204 bp = netdev_priv(dev);
12205 bp->msglevel = debug;
12206
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012207 pci_set_drvdata(pdev, dev);
12208
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012209 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012210 if (rc < 0) {
12211 free_netdev(dev);
12212 return rc;
12213 }
12214
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012215 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012216 if (rc)
12217 goto init_one_exit;
12218
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012219 /* Set init arrays */
12220 rc = bnx2x_init_firmware(bp, &pdev->dev);
12221 if (rc) {
12222 printk(KERN_ERR PFX "Error loading firmware\n");
12223 goto init_one_exit;
12224 }
12225
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012226 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012227 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012228 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012229 goto init_one_exit;
12230 }
12231
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012232 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Eliezer Tamir25047952008-02-28 11:50:16 -080012233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000012234 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012235 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012236 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
Eliezer Tamir25047952008-02-28 11:50:16 -080012237 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070012238 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012239
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012240 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012241
12242init_one_exit:
12243 if (bp->regview)
12244 iounmap(bp->regview);
12245
12246 if (bp->doorbells)
12247 iounmap(bp->doorbells);
12248
12249 free_netdev(dev);
12250
12251 if (atomic_read(&pdev->enable_cnt) == 1)
12252 pci_release_regions(pdev);
12253
12254 pci_disable_device(pdev);
12255 pci_set_drvdata(pdev, NULL);
12256
12257 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012258}
12259
12260static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12261{
12262 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012263 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012264
Eliezer Tamir228241e2008-02-28 11:56:57 -080012265 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080012266 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12267 return;
12268 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012269 bp = netdev_priv(dev);
12270
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012271 unregister_netdev(dev);
12272
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012273 kfree(bp->init_ops_offsets);
12274 kfree(bp->init_ops);
12275 kfree(bp->init_data);
12276 release_firmware(bp->firmware);
12277
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012278 if (bp->regview)
12279 iounmap(bp->regview);
12280
12281 if (bp->doorbells)
12282 iounmap(bp->doorbells);
12283
12284 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012285
12286 if (atomic_read(&pdev->enable_cnt) == 1)
12287 pci_release_regions(pdev);
12288
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012289 pci_disable_device(pdev);
12290 pci_set_drvdata(pdev, NULL);
12291}
12292
12293static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12294{
12295 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012296 struct bnx2x *bp;
12297
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012298 if (!dev) {
12299 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12300 return -ENODEV;
12301 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012302 bp = netdev_priv(dev);
12303
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012304 rtnl_lock();
12305
12306 pci_save_state(pdev);
12307
12308 if (!netif_running(dev)) {
12309 rtnl_unlock();
12310 return 0;
12311 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012312
12313 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012314
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012315 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012316
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012317 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012318
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012319 rtnl_unlock();
12320
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012321 return 0;
12322}
12323
12324static int bnx2x_resume(struct pci_dev *pdev)
12325{
12326 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012327 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012328 int rc;
12329
Eliezer Tamir228241e2008-02-28 11:56:57 -080012330 if (!dev) {
12331 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12332 return -ENODEV;
12333 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012334 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012335
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012336 rtnl_lock();
12337
Eliezer Tamir228241e2008-02-28 11:56:57 -080012338 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012339
12340 if (!netif_running(dev)) {
12341 rtnl_unlock();
12342 return 0;
12343 }
12344
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012345 bnx2x_set_power_state(bp, PCI_D0);
12346 netif_device_attach(dev);
12347
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012348 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012349
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012350 rtnl_unlock();
12351
12352 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012353}
12354
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012355static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12356{
12357 int i;
12358
12359 bp->state = BNX2X_STATE_ERROR;
12360
12361 bp->rx_mode = BNX2X_RX_MODE_NONE;
12362
12363 bnx2x_netif_stop(bp, 0);
12364
12365 del_timer_sync(&bp->timer);
12366 bp->stats_state = STATS_STATE_DISABLED;
12367 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12368
12369 /* Release IRQs */
12370 bnx2x_free_irq(bp);
12371
12372 if (CHIP_IS_E1(bp)) {
12373 struct mac_configuration_cmd *config =
12374 bnx2x_sp(bp, mcast_config);
12375
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012376 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012377 CAM_INVALIDATE(config->config_table[i]);
12378 }
12379
12380 /* Free SKBs, SGEs, TPA pool and driver internals */
12381 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012382 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012383 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012384 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012385 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012386 bnx2x_free_mem(bp);
12387
12388 bp->state = BNX2X_STATE_CLOSED;
12389
12390 netif_carrier_off(bp->dev);
12391
12392 return 0;
12393}
12394
12395static void bnx2x_eeh_recover(struct bnx2x *bp)
12396{
12397 u32 val;
12398
12399 mutex_init(&bp->port.phy_mutex);
12400
12401 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12402 bp->link_params.shmem_base = bp->common.shmem_base;
12403 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12404
12405 if (!bp->common.shmem_base ||
12406 (bp->common.shmem_base < 0xA0000) ||
12407 (bp->common.shmem_base >= 0xC0000)) {
12408 BNX2X_DEV_INFO("MCP not active\n");
12409 bp->flags |= NO_MCP_FLAG;
12410 return;
12411 }
12412
12413 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12414 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12415 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12416 BNX2X_ERR("BAD MCP validity signature\n");
12417
12418 if (!BP_NOMCP(bp)) {
12419 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12420 & DRV_MSG_SEQ_NUMBER_MASK);
12421 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12422 }
12423}
12424
Wendy Xiong493adb12008-06-23 20:36:22 -070012425/**
12426 * bnx2x_io_error_detected - called when PCI error is detected
12427 * @pdev: Pointer to PCI device
12428 * @state: The current pci connection state
12429 *
12430 * This function is called after a PCI bus error affecting
12431 * this device has been detected.
12432 */
12433static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12434 pci_channel_state_t state)
12435{
12436 struct net_device *dev = pci_get_drvdata(pdev);
12437 struct bnx2x *bp = netdev_priv(dev);
12438
12439 rtnl_lock();
12440
12441 netif_device_detach(dev);
12442
Dean Nelson07ce50e2009-07-31 09:13:25 +000012443 if (state == pci_channel_io_perm_failure) {
12444 rtnl_unlock();
12445 return PCI_ERS_RESULT_DISCONNECT;
12446 }
12447
Wendy Xiong493adb12008-06-23 20:36:22 -070012448 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012449 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012450
12451 pci_disable_device(pdev);
12452
12453 rtnl_unlock();
12454
12455 /* Request a slot reset */
12456 return PCI_ERS_RESULT_NEED_RESET;
12457}
12458
12459/**
12460 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12461 * @pdev: Pointer to PCI device
12462 *
12463 * Restart the card from scratch, as if from a cold-boot.
12464 */
12465static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12466{
12467 struct net_device *dev = pci_get_drvdata(pdev);
12468 struct bnx2x *bp = netdev_priv(dev);
12469
12470 rtnl_lock();
12471
12472 if (pci_enable_device(pdev)) {
12473 dev_err(&pdev->dev,
12474 "Cannot re-enable PCI device after reset\n");
12475 rtnl_unlock();
12476 return PCI_ERS_RESULT_DISCONNECT;
12477 }
12478
12479 pci_set_master(pdev);
12480 pci_restore_state(pdev);
12481
12482 if (netif_running(dev))
12483 bnx2x_set_power_state(bp, PCI_D0);
12484
12485 rtnl_unlock();
12486
12487 return PCI_ERS_RESULT_RECOVERED;
12488}
12489
12490/**
12491 * bnx2x_io_resume - called when traffic can start flowing again
12492 * @pdev: Pointer to PCI device
12493 *
12494 * This callback is called when the error recovery driver tells us that
12495 * its OK to resume normal operation.
12496 */
12497static void bnx2x_io_resume(struct pci_dev *pdev)
12498{
12499 struct net_device *dev = pci_get_drvdata(pdev);
12500 struct bnx2x *bp = netdev_priv(dev);
12501
12502 rtnl_lock();
12503
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012504 bnx2x_eeh_recover(bp);
12505
Wendy Xiong493adb12008-06-23 20:36:22 -070012506 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012507 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012508
12509 netif_device_attach(dev);
12510
12511 rtnl_unlock();
12512}
12513
12514static struct pci_error_handlers bnx2x_err_handler = {
12515 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012516 .slot_reset = bnx2x_io_slot_reset,
12517 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012518};
12519
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012520static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012521 .name = DRV_MODULE_NAME,
12522 .id_table = bnx2x_pci_tbl,
12523 .probe = bnx2x_init_one,
12524 .remove = __devexit_p(bnx2x_remove_one),
12525 .suspend = bnx2x_suspend,
12526 .resume = bnx2x_resume,
12527 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012528};
12529
12530static int __init bnx2x_init(void)
12531{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012532 int ret;
12533
Eilon Greenstein938cf542009-08-12 08:23:37 +000012534 printk(KERN_INFO "%s", version);
12535
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012536 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12537 if (bnx2x_wq == NULL) {
12538 printk(KERN_ERR PFX "Cannot create workqueue\n");
12539 return -ENOMEM;
12540 }
12541
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012542 ret = pci_register_driver(&bnx2x_pci_driver);
12543 if (ret) {
12544 printk(KERN_ERR PFX "Cannot register driver\n");
12545 destroy_workqueue(bnx2x_wq);
12546 }
12547 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012548}
12549
12550static void __exit bnx2x_cleanup(void)
12551{
12552 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012553
12554 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012555}
12556
12557module_init(bnx2x_init);
12558module_exit(bnx2x_cleanup);
12559
Michael Chan993ac7b2009-10-10 13:46:56 +000012560#ifdef BCM_CNIC
12561
12562/* count denotes the number of new completions we have seen */
12563static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12564{
12565 struct eth_spe *spe;
12566
12567#ifdef BNX2X_STOP_ON_ERROR
12568 if (unlikely(bp->panic))
12569 return;
12570#endif
12571
12572 spin_lock_bh(&bp->spq_lock);
12573 bp->cnic_spq_pending -= count;
12574
12575 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12576 bp->cnic_spq_pending++) {
12577
12578 if (!bp->cnic_kwq_pending)
12579 break;
12580
12581 spe = bnx2x_sp_get_next(bp);
12582 *spe = *bp->cnic_kwq_cons;
12583
12584 bp->cnic_kwq_pending--;
12585
12586 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12587 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12588
12589 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12590 bp->cnic_kwq_cons = bp->cnic_kwq;
12591 else
12592 bp->cnic_kwq_cons++;
12593 }
12594 bnx2x_sp_prod_update(bp);
12595 spin_unlock_bh(&bp->spq_lock);
12596}
12597
12598static int bnx2x_cnic_sp_queue(struct net_device *dev,
12599 struct kwqe_16 *kwqes[], u32 count)
12600{
12601 struct bnx2x *bp = netdev_priv(dev);
12602 int i;
12603
12604#ifdef BNX2X_STOP_ON_ERROR
12605 if (unlikely(bp->panic))
12606 return -EIO;
12607#endif
12608
12609 spin_lock_bh(&bp->spq_lock);
12610
12611 for (i = 0; i < count; i++) {
12612 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12613
12614 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12615 break;
12616
12617 *bp->cnic_kwq_prod = *spe;
12618
12619 bp->cnic_kwq_pending++;
12620
12621 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12622 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12623 spe->data.mac_config_addr.hi,
12624 spe->data.mac_config_addr.lo,
12625 bp->cnic_kwq_pending);
12626
12627 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12628 bp->cnic_kwq_prod = bp->cnic_kwq;
12629 else
12630 bp->cnic_kwq_prod++;
12631 }
12632
12633 spin_unlock_bh(&bp->spq_lock);
12634
12635 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12636 bnx2x_cnic_sp_post(bp, 0);
12637
12638 return i;
12639}
12640
12641static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12642{
12643 struct cnic_ops *c_ops;
12644 int rc = 0;
12645
12646 mutex_lock(&bp->cnic_mutex);
12647 c_ops = bp->cnic_ops;
12648 if (c_ops)
12649 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12650 mutex_unlock(&bp->cnic_mutex);
12651
12652 return rc;
12653}
12654
12655static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12656{
12657 struct cnic_ops *c_ops;
12658 int rc = 0;
12659
12660 rcu_read_lock();
12661 c_ops = rcu_dereference(bp->cnic_ops);
12662 if (c_ops)
12663 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12664 rcu_read_unlock();
12665
12666 return rc;
12667}
12668
12669/*
12670 * for commands that have no data
12671 */
12672static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12673{
12674 struct cnic_ctl_info ctl = {0};
12675
12676 ctl.cmd = cmd;
12677
12678 return bnx2x_cnic_ctl_send(bp, &ctl);
12679}
12680
12681static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12682{
12683 struct cnic_ctl_info ctl;
12684
12685 /* first we tell CNIC and only then we count this as a completion */
12686 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12687 ctl.data.comp.cid = cid;
12688
12689 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12690 bnx2x_cnic_sp_post(bp, 1);
12691}
12692
12693static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12694{
12695 struct bnx2x *bp = netdev_priv(dev);
12696 int rc = 0;
12697
12698 switch (ctl->cmd) {
12699 case DRV_CTL_CTXTBL_WR_CMD: {
12700 u32 index = ctl->data.io.offset;
12701 dma_addr_t addr = ctl->data.io.dma_addr;
12702
12703 bnx2x_ilt_wr(bp, index, addr);
12704 break;
12705 }
12706
12707 case DRV_CTL_COMPLETION_CMD: {
12708 int count = ctl->data.comp.comp_count;
12709
12710 bnx2x_cnic_sp_post(bp, count);
12711 break;
12712 }
12713
12714 /* rtnl_lock is held. */
12715 case DRV_CTL_START_L2_CMD: {
12716 u32 cli = ctl->data.ring.client_id;
12717
12718 bp->rx_mode_cl_mask |= (1 << cli);
12719 bnx2x_set_storm_rx_mode(bp);
12720 break;
12721 }
12722
12723 /* rtnl_lock is held. */
12724 case DRV_CTL_STOP_L2_CMD: {
12725 u32 cli = ctl->data.ring.client_id;
12726
12727 bp->rx_mode_cl_mask &= ~(1 << cli);
12728 bnx2x_set_storm_rx_mode(bp);
12729 break;
12730 }
12731
12732 default:
12733 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12734 rc = -EINVAL;
12735 }
12736
12737 return rc;
12738}
12739
12740static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12741{
12742 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12743
12744 if (bp->flags & USING_MSIX_FLAG) {
12745 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12747 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12748 } else {
12749 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12750 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12751 }
12752 cp->irq_arr[0].status_blk = bp->cnic_sb;
12753 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12754 cp->irq_arr[1].status_blk = bp->def_status_blk;
12755 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12756
12757 cp->num_irq = 2;
12758}
12759
12760static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12761 void *data)
12762{
12763 struct bnx2x *bp = netdev_priv(dev);
12764 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12765
12766 if (ops == NULL)
12767 return -EINVAL;
12768
12769 if (atomic_read(&bp->intr_sem) != 0)
12770 return -EBUSY;
12771
12772 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12773 if (!bp->cnic_kwq)
12774 return -ENOMEM;
12775
12776 bp->cnic_kwq_cons = bp->cnic_kwq;
12777 bp->cnic_kwq_prod = bp->cnic_kwq;
12778 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12779
12780 bp->cnic_spq_pending = 0;
12781 bp->cnic_kwq_pending = 0;
12782
12783 bp->cnic_data = data;
12784
12785 cp->num_irq = 0;
12786 cp->drv_state = CNIC_DRV_STATE_REGD;
12787
12788 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12789
12790 bnx2x_setup_cnic_irq_info(bp);
12791 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12792 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12793 rcu_assign_pointer(bp->cnic_ops, ops);
12794
12795 return 0;
12796}
12797
12798static int bnx2x_unregister_cnic(struct net_device *dev)
12799{
12800 struct bnx2x *bp = netdev_priv(dev);
12801 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12802
12803 mutex_lock(&bp->cnic_mutex);
12804 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12805 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12806 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12807 }
12808 cp->drv_state = 0;
12809 rcu_assign_pointer(bp->cnic_ops, NULL);
12810 mutex_unlock(&bp->cnic_mutex);
12811 synchronize_rcu();
12812 kfree(bp->cnic_kwq);
12813 bp->cnic_kwq = NULL;
12814
12815 return 0;
12816}
12817
12818struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12819{
12820 struct bnx2x *bp = netdev_priv(dev);
12821 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12822
12823 cp->drv_owner = THIS_MODULE;
12824 cp->chip_id = CHIP_ID(bp);
12825 cp->pdev = bp->pdev;
12826 cp->io_base = bp->regview;
12827 cp->io_base2 = bp->doorbells;
12828 cp->max_kwqe_pending = 8;
12829 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12830 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12831 cp->ctx_tbl_len = CNIC_ILT_LINES;
12832 cp->starting_cid = BCM_CNIC_CID_START;
12833 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12834 cp->drv_ctl = bnx2x_drv_ctl;
12835 cp->drv_register_cnic = bnx2x_register_cnic;
12836 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12837
12838 return cp;
12839}
12840EXPORT_SYMBOL(bnx2x_cnic_probe);
12841
12842#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012843