blob: bdecd42d2b29a951b1d8faef1532a98f32aaf8a9 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000060#define DRV_MODULE_VERSION "1.52.1-5"
Eilon Greenstein0ab365f2009-11-09 06:09:37 +000061#define DRV_MODULE_RELDATE "2009/11/09"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143static const struct pci_device_id bnx2x_pci_tbl[] = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perchesad361c92009-07-06 13:05:40 -0700517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perchesad361c92009-07-06 13:05:40 -0700519 printk(KERN_ERR PFX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800525 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800532 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perchesad361c92009-07-06 13:05:40 -0700534 printk(KERN_ERR PFX "end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 u8 storm, u16 index, u8 op, u8 update)
769{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
Eilon Greenstein5c862842008-08-13 15:51:48 -0700781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788}
789
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791{
792 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200793
794 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200797}
798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804
Eilon Greenstein5c862842008-08-13 15:51:48 -0700805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000821}
822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200834 int nbd;
835
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847
Eilon Greensteinca003922009-08-12 22:53:28 -0700848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 bnx2x_panic();
853 }
854#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700855 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700882 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000883 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888}
889
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892 s16 used;
893 u16 prod;
894 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200903
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700904#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700908#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911}
912
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924{
925 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000926 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000931 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932#endif
933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000934 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200946 hw_cons, sw_cons, pkt_cons);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000961 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200962
Eilon Greenstein60447352009-03-02 07:59:24 +0000963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000971 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700972 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000974 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000976 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200977}
978
Michael Chan993ac7b2009-10-10 13:46:56 +0000979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000992 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700993 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200994
995 bp->spq_left++;
996
Eilon Greenstein0626b892009-02-12 08:38:14 +0000997 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001016 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001017 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001018 return;
1019 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036 break;
1037
Michael Chan993ac7b2009-10-10 13:46:56 +00001038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001044
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001048 bp->set_mac_pending--;
1049 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001050 break;
1051
Eliezer Tamir49d66772008-02-28 11:53:13 -08001052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001054 bp->set_mac_pending--;
1055 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001056 break;
1057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001061 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001063 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064}
1065
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001107 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001202 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001203 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
Eilon Greenstein33471622008-08-13 15:59:08 -07001256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001340 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001347 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001382
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001383 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001449 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001452 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001463 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001484 ((u32 *)&rx_prods)[i]);
1485
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001486 mmiowb(); /* keep prod updates ordered */
1487
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001488 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001491}
1492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001513 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001524 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001525
1526 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001527 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001530 u8 cqe_fp_flags;
1531 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001553
1554 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001573 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001602 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001623 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001638 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001639 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001640 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
Eilon Greensteina119a062009-08-12 08:23:23 +00001654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001658 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001665 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001666 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001667 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001676 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001679 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001680 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001682 }
1683
Eilon Greenstein748e5432009-02-12 08:36:37 +00001684 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001694 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001708 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001713 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001731
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001739 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
1746
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001753
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001759 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001760 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001761 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001762 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001764 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001771 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
Eilon Greenstein3196a882008-08-13 15:58:49 -07001777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
Eilon Greensteinca003922009-08-12 22:53:28 -07001782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001784
Eilon Greensteinca003922009-08-12 22:53:28 -07001785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001795 status &= ~mask;
1796 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797 }
1798
Michael Chan993ac7b2009-10-10 13:46:56 +00001799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001814 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001825
1826 return IRQ_HANDLED;
1827}
1828
1829/* end of fast path */
1830
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001832
1833/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834
1835/*
1836 * General service functions
1837 */
1838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001840{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001845 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001846
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
1854
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
Eliezer Tamirf1410642008-02-28 11:51:50 -08001862 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
1869
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001872 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001875 if (lock_status & resource_bit)
1876 return 0;
1877
1878 msleep(5);
1879 }
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
1883
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001890
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
Eliezer Tamirf1410642008-02-28 11:51:50 -08001906 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001907 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
1912 }
1913
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001914 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001915 return 0;
1916}
1917
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001918/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001920{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001921 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001922
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001925}
1926
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001927static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001928{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001932 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001933}
1934
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
1979
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
1992
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
2000
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
2007
2008 default:
2009 break;
2010 }
2011
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002014
2015 return 0;
2016}
2017
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
Eliezer Tamirf1410642008-02-28 11:51:50 -08002064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065{
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
2068
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
2073 }
2074
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
2086
Eilon Greenstein6378c022008-08-13 15:59:25 -07002087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
2093
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
2099
2100 default:
2101 break;
2102 }
2103
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002106
2107 return 0;
2108}
2109
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002111{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002116 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002117 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002118
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002121 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002122 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002123
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002126 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002127
Eliezer Tamirf1410642008-02-28 11:51:50 -08002128 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002131 break;
2132 }
2133}
2134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135static void bnx2x_link_report(struct bnx2x *bp)
2136{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002137 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2141 }
2142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002144 u16 line_speed;
2145
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
2160 printk("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002161
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002162 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002163 printk("full duplex");
2164 else
2165 printk("half duplex");
2166
David S. Millerc0700f92008-12-16 23:53:20 -08002167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002172 printk("& transmit ");
2173 } else {
2174 printk(", transmit ");
2175 }
2176 printk("flow control ON");
2177 }
2178 printk("\n");
2179
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183 }
2184}
2185
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002187{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
Eilon Greenstein19680c42008-08-13 15:47:33 -07002191 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002194 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002196 else
David S. Millerc0700f92008-12-16 23:53:20 -08002197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002198
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002199 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
Eilon Greenstein19680c42008-08-13 15:47:33 -07002204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002205
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002206 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002207
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002208 bnx2x_calc_fc_adv(bp);
2209
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002212 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002213 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002214
Eilon Greenstein19680c42008-08-13 15:47:33 -07002215 return rc;
2216 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002218 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002219}
2220
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002221static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002222{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002223 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002224 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002226 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002227
Eilon Greenstein19680c42008-08-13 15:47:33 -07002228 bnx2x_calc_fc_adv(bp);
2229 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002230 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002231}
2232
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002235 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002236 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002238 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002239 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002241}
2242
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
2246
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002247 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002249 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002250
2251 return rc;
2252}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002254static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002255{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002259
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002263
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002266
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002277
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002280
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002287}
2288
Eilon Greenstein2691d512009-08-12 08:22:08 +00002289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002333}
2334
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002371 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002398/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002399static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002400{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002406 if (bp->link_vars.link_up) {
2407
Eilon Greenstein1c063282009-02-12 08:36:43 +00002408 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002418 pause_enabled);
2419 }
2420
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002429 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002433 /* indicate link status */
2434 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002435
2436 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002437 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002438 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002439 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002440
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002441 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002446 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002450
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002451 if (bp->link_vars.link_up) {
2452 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002453
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002467 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002468}
2469
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002470static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002471{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002473 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002474
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
Eilon Greenstein2691d512009-08-12 08:22:08 +00002482 bnx2x_calc_vn_weight_sum(bp);
2483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002484 /* indicate link status */
2485 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002486}
2487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002502}
2503
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002504/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
Eilon Greenstein2691d512009-08-12 08:22:08 +00002512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002521 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002546 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002558
2559 netif_tx_disable(bp->dev);
2560 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2561
2562 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2563
Eilon Greenstein2691d512009-08-12 08:22:08 +00002564 netif_carrier_off(bp->dev);
2565}
2566
2567static void bnx2x_e1h_enable(struct bnx2x *bp)
2568{
2569 int port = BP_PORT(bp);
2570
2571 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2572
Eilon Greenstein2691d512009-08-12 08:22:08 +00002573 /* Tx queue should be only reenabled */
2574 netif_tx_wake_all_queues(bp->dev);
2575
Eilon Greenstein061bc702009-10-15 00:18:47 -07002576 /*
2577 * Should not call netif_carrier_on since it will be called if the link
2578 * is up when checking for link state
2579 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002580}
2581
2582static void bnx2x_update_min_max(struct bnx2x *bp)
2583{
2584 int port = BP_PORT(bp);
2585 int vn, i;
2586
2587 /* Init rate shaping and fairness contexts */
2588 bnx2x_init_port_minmax(bp);
2589
2590 bnx2x_calc_vn_weight_sum(bp);
2591
2592 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2593 bnx2x_init_vn_minmax(bp, 2*vn + port);
2594
2595 if (bp->port.pmf) {
2596 int func;
2597
2598 /* Set the attention towards other drivers on the same port */
2599 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2600 if (vn == BP_E1HVN(bp))
2601 continue;
2602
2603 func = ((vn << 1) | port);
2604 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2605 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2606 }
2607
2608 /* Store it to internal memory */
2609 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2610 REG_WR(bp, BAR_XSTRORM_INTMEM +
2611 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2612 ((u32 *)(&bp->cmng))[i]);
2613 }
2614}
2615
2616static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2617{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002618 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002619
2620 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2621
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002622 /*
2623 * This is the only place besides the function initialization
2624 * where the bp->flags can change so it is done without any
2625 * locks
2626 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002627 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2628 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002629 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002630
2631 bnx2x_e1h_disable(bp);
2632 } else {
2633 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002634 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002635
2636 bnx2x_e1h_enable(bp);
2637 }
2638 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2639 }
2640 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2641
2642 bnx2x_update_min_max(bp);
2643 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2644 }
2645
2646 /* Report results to MCP */
2647 if (dcc_event)
2648 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2649 else
2650 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2651}
2652
Michael Chan28912902009-10-10 13:46:53 +00002653/* must be called under the spq lock */
2654static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2655{
2656 struct eth_spe *next_spe = bp->spq_prod_bd;
2657
2658 if (bp->spq_prod_bd == bp->spq_last_bd) {
2659 bp->spq_prod_bd = bp->spq;
2660 bp->spq_prod_idx = 0;
2661 DP(NETIF_MSG_TIMER, "end of spq\n");
2662 } else {
2663 bp->spq_prod_bd++;
2664 bp->spq_prod_idx++;
2665 }
2666 return next_spe;
2667}
2668
2669/* must be called under the spq lock */
2670static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2671{
2672 int func = BP_FUNC(bp);
2673
2674 /* Make sure that BD data is updated before writing the producer */
2675 wmb();
2676
2677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2678 bp->spq_prod_idx);
2679 mmiowb();
2680}
2681
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002682/* the slow path queue is odd since completions arrive on the fastpath ring */
2683static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2684 u32 data_hi, u32 data_lo, int common)
2685{
Michael Chan28912902009-10-10 13:46:53 +00002686 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002688 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2689 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002690 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2691 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2692 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2693
2694#ifdef BNX2X_STOP_ON_ERROR
2695 if (unlikely(bp->panic))
2696 return -EIO;
2697#endif
2698
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002699 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002700
2701 if (!bp->spq_left) {
2702 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002703 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002704 bnx2x_panic();
2705 return -EBUSY;
2706 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002707
Michael Chan28912902009-10-10 13:46:53 +00002708 spe = bnx2x_sp_get_next(bp);
2709
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002710 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002711 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002712 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2713 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002714 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002715 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002716 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002717 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2718
Michael Chan28912902009-10-10 13:46:53 +00002719 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2720 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002721
2722 bp->spq_left--;
2723
Michael Chan28912902009-10-10 13:46:53 +00002724 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002725 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002726 return 0;
2727}
2728
2729/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002730static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002732 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002733 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002734
2735 might_sleep();
2736 i = 100;
2737 for (j = 0; j < i*10; j++) {
2738 val = (1UL << 31);
2739 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2740 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2741 if (val & (1L << 31))
2742 break;
2743
2744 msleep(5);
2745 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002746 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002747 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748 rc = -EBUSY;
2749 }
2750
2751 return rc;
2752}
2753
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002754/* release split MCP access lock register */
2755static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002756{
2757 u32 val = 0;
2758
2759 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2760}
2761
2762static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2763{
2764 struct host_def_status_block *def_sb = bp->def_status_blk;
2765 u16 rc = 0;
2766
2767 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002768 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2769 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2770 rc |= 1;
2771 }
2772 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2773 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2774 rc |= 2;
2775 }
2776 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2777 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2778 rc |= 4;
2779 }
2780 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2781 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2782 rc |= 8;
2783 }
2784 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2785 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2786 rc |= 16;
2787 }
2788 return rc;
2789}
2790
2791/*
2792 * slow path service functions
2793 */
2794
2795static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2796{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002797 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002798 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2799 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002800 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2801 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002802 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2803 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002804 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002805 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002807 if (bp->attn_state & asserted)
2808 BNX2X_ERR("IGU ERROR\n");
2809
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002810 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2811 aeu_mask = REG_RD(bp, aeu_addr);
2812
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002813 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002814 aeu_mask, asserted);
2815 aeu_mask &= ~(asserted & 0xff);
2816 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002817
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002818 REG_WR(bp, aeu_addr, aeu_mask);
2819 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002820
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002821 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002822 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002823 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002824
2825 if (asserted & ATTN_HARD_WIRED_MASK) {
2826 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002827
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002828 bnx2x_acquire_phy_lock(bp);
2829
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002830 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002831 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002832 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002833
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002834 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835
2836 /* handle unicore attn? */
2837 }
2838 if (asserted & ATTN_SW_TIMER_4_FUNC)
2839 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2840
2841 if (asserted & GPIO_2_FUNC)
2842 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2843
2844 if (asserted & GPIO_3_FUNC)
2845 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2846
2847 if (asserted & GPIO_4_FUNC)
2848 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2849
2850 if (port == 0) {
2851 if (asserted & ATTN_GENERAL_ATTN_1) {
2852 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2853 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2854 }
2855 if (asserted & ATTN_GENERAL_ATTN_2) {
2856 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2857 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2858 }
2859 if (asserted & ATTN_GENERAL_ATTN_3) {
2860 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2861 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2862 }
2863 } else {
2864 if (asserted & ATTN_GENERAL_ATTN_4) {
2865 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2866 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2867 }
2868 if (asserted & ATTN_GENERAL_ATTN_5) {
2869 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2870 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2871 }
2872 if (asserted & ATTN_GENERAL_ATTN_6) {
2873 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2874 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2875 }
2876 }
2877
2878 } /* if hardwired */
2879
Eilon Greenstein5c862842008-08-13 15:51:48 -07002880 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2881 asserted, hc_addr);
2882 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002883
2884 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002885 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002886 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002887 bnx2x_release_phy_lock(bp);
2888 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002889}
2890
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002891static inline void bnx2x_fan_failure(struct bnx2x *bp)
2892{
2893 int port = BP_PORT(bp);
2894
2895 /* mark the failure */
2896 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2897 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2898 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2899 bp->link_params.ext_phy_config);
2900
2901 /* log the failure */
2902 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2903 " the driver to shutdown the card to prevent permanent"
2904 " damage. Please contact Dell Support for assistance\n",
2905 bp->dev->name);
2906}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002907
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002908static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2909{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002910 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002911 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002912 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002913
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002914 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2915 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002916
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002917 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002918
2919 val = REG_RD(bp, reg_offset);
2920 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2921 REG_WR(bp, reg_offset, val);
2922
2923 BNX2X_ERR("SPIO5 hw attention\n");
2924
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002925 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002926 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2927 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002928 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002929 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002930 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002931 /* The PHY reset is controlled by GPIO 1 */
2932 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2933 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002934 break;
2935
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002936 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2937 /* The PHY reset is controlled by GPIO 1 */
2938 /* fake the port number to cancel the swap done in
2939 set_gpio() */
2940 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2941 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2942 port = (swap_val && swap_override) ^ 1;
2943 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2944 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2945 break;
2946
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002947 default:
2948 break;
2949 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002950 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002951 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002952
Eilon Greenstein589abe32009-02-12 08:36:55 +00002953 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2954 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2955 bnx2x_acquire_phy_lock(bp);
2956 bnx2x_handle_module_detect_int(&bp->link_params);
2957 bnx2x_release_phy_lock(bp);
2958 }
2959
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002960 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2961
2962 val = REG_RD(bp, reg_offset);
2963 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2964 REG_WR(bp, reg_offset, val);
2965
2966 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002967 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002968 bnx2x_panic();
2969 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002970}
2971
2972static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2973{
2974 u32 val;
2975
Eilon Greenstein0626b892009-02-12 08:38:14 +00002976 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002977
2978 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2979 BNX2X_ERR("DB hw attention 0x%x\n", val);
2980 /* DORQ discard attention */
2981 if (val & 0x2)
2982 BNX2X_ERR("FATAL error from DORQ\n");
2983 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002984
2985 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2986
2987 int port = BP_PORT(bp);
2988 int reg_offset;
2989
2990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2992
2993 val = REG_RD(bp, reg_offset);
2994 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2995 REG_WR(bp, reg_offset, val);
2996
2997 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002998 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002999 bnx2x_panic();
3000 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003001}
3002
3003static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3004{
3005 u32 val;
3006
3007 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3008
3009 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3010 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3011 /* CFC error attention */
3012 if (val & 0x2)
3013 BNX2X_ERR("FATAL error from CFC\n");
3014 }
3015
3016 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3017
3018 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3019 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3020 /* RQ_USDMDP_FIFO_OVERFLOW */
3021 if (val & 0x18000)
3022 BNX2X_ERR("FATAL error from PXP\n");
3023 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003024
3025 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3026
3027 int port = BP_PORT(bp);
3028 int reg_offset;
3029
3030 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3031 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3032
3033 val = REG_RD(bp, reg_offset);
3034 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3035 REG_WR(bp, reg_offset, val);
3036
3037 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003038 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003039 bnx2x_panic();
3040 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003041}
3042
3043static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3044{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003045 u32 val;
3046
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003047 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3048
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003049 if (attn & BNX2X_PMF_LINK_ASSERT) {
3050 int func = BP_FUNC(bp);
3051
3052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003053 bp->mf_config = SHMEM_RD(bp,
3054 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003055 val = SHMEM_RD(bp, func_mb[func].drv_status);
3056 if (val & DRV_STATUS_DCC_EVENT_MASK)
3057 bnx2x_dcc_event(bp,
3058 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003059 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003060 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003061 bnx2x_pmf_update(bp);
3062
3063 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003064
3065 BNX2X_ERR("MC assert!\n");
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3070 bnx2x_panic();
3071
3072 } else if (attn & BNX2X_MCP_ASSERT) {
3073
3074 BNX2X_ERR("MCP assert!\n");
3075 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003076 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003077
3078 } else
3079 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3080 }
3081
3082 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003083 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3084 if (attn & BNX2X_GRC_TIMEOUT) {
3085 val = CHIP_IS_E1H(bp) ?
3086 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3087 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3088 }
3089 if (attn & BNX2X_GRC_RSV) {
3090 val = CHIP_IS_E1H(bp) ?
3091 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3092 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3093 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003094 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003095 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003096}
3097
3098static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3099{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003100 struct attn_route attn;
3101 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003102 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003103 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003104 u32 reg_addr;
3105 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003106 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003107
3108 /* need to take HW lock because MCP or other port might also
3109 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003110 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003111
3112 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3113 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3114 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3115 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003116 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3117 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003118
3119 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3120 if (deasserted & (1 << index)) {
3121 group_mask = bp->attn_group[index];
3122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003123 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3124 index, group_mask.sig[0], group_mask.sig[1],
3125 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003126
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003127 bnx2x_attn_int_deasserted3(bp,
3128 attn.sig[3] & group_mask.sig[3]);
3129 bnx2x_attn_int_deasserted1(bp,
3130 attn.sig[1] & group_mask.sig[1]);
3131 bnx2x_attn_int_deasserted2(bp,
3132 attn.sig[2] & group_mask.sig[2]);
3133 bnx2x_attn_int_deasserted0(bp,
3134 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003135
3136 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003137 HW_PRTY_ASSERT_SET_0) ||
3138 (attn.sig[1] & group_mask.sig[1] &
3139 HW_PRTY_ASSERT_SET_1) ||
3140 (attn.sig[2] & group_mask.sig[2] &
3141 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003142 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003143 }
3144 }
3145
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003146 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003147
Eilon Greenstein5c862842008-08-13 15:51:48 -07003148 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003149
3150 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003151 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3152 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003153 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003154
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003155 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003156 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003157
3158 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3159 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3160
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003161 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3162 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003163
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003164 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3165 aeu_mask, deasserted);
3166 aeu_mask |= (deasserted & 0xff);
3167 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3168
3169 REG_WR(bp, reg_addr, aeu_mask);
3170 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003171
3172 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3173 bp->attn_state &= ~deasserted;
3174 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3175}
3176
3177static void bnx2x_attn_int(struct bnx2x *bp)
3178{
3179 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003180 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3181 attn_bits);
3182 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3183 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003184 u32 attn_state = bp->attn_state;
3185
3186 /* look for changed bits */
3187 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3188 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3189
3190 DP(NETIF_MSG_HW,
3191 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3192 attn_bits, attn_ack, asserted, deasserted);
3193
3194 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003195 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003196
3197 /* handle bits that were raised */
3198 if (asserted)
3199 bnx2x_attn_int_asserted(bp, asserted);
3200
3201 if (deasserted)
3202 bnx2x_attn_int_deasserted(bp, deasserted);
3203}
3204
3205static void bnx2x_sp_task(struct work_struct *work)
3206{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003207 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003208 u16 status;
3209
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003210
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003211 /* Return here if interrupt is disabled */
3212 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003213 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003214 return;
3215 }
3216
3217 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003218/* if (status == 0) */
3219/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003220
Eilon Greenstein3196a882008-08-13 15:58:49 -07003221 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003222
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003223 /* HW attentions */
3224 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003225 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003226
Eilon Greenstein68d59482009-01-14 21:27:36 -08003227 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003228 IGU_INT_NOP, 1);
3229 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3230 IGU_INT_NOP, 1);
3231 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3232 IGU_INT_NOP, 1);
3233 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3234 IGU_INT_NOP, 1);
3235 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3236 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003237
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003238}
3239
3240static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3241{
3242 struct net_device *dev = dev_instance;
3243 struct bnx2x *bp = netdev_priv(dev);
3244
3245 /* Return here if interrupt is disabled */
3246 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003247 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003248 return IRQ_HANDLED;
3249 }
3250
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003251 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003252
3253#ifdef BNX2X_STOP_ON_ERROR
3254 if (unlikely(bp->panic))
3255 return IRQ_HANDLED;
3256#endif
3257
Michael Chan993ac7b2009-10-10 13:46:56 +00003258#ifdef BCM_CNIC
3259 {
3260 struct cnic_ops *c_ops;
3261
3262 rcu_read_lock();
3263 c_ops = rcu_dereference(bp->cnic_ops);
3264 if (c_ops)
3265 c_ops->cnic_handler(bp->cnic_data, NULL);
3266 rcu_read_unlock();
3267 }
3268#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003269 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003270
3271 return IRQ_HANDLED;
3272}
3273
3274/* end of slow path */
3275
3276/* Statistics */
3277
3278/****************************************************************************
3279* Macros
3280****************************************************************************/
3281
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003282/* sum[hi:lo] += add[hi:lo] */
3283#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3284 do { \
3285 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003286 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003287 } while (0)
3288
3289/* difference = minuend - subtrahend */
3290#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3291 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003292 if (m_lo < s_lo) { \
3293 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003294 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003295 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003296 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003297 d_hi--; \
3298 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003299 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003300 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003301 d_hi = 0; \
3302 d_lo = 0; \
3303 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003304 } else { \
3305 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003306 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003307 d_hi = 0; \
3308 d_lo = 0; \
3309 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003310 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003311 d_hi = m_hi - s_hi; \
3312 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003313 } \
3314 } \
3315 } while (0)
3316
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003317#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003318 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003319 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3320 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3321 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3322 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3323 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3324 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003325 } while (0)
3326
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003327#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003328 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003329 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3330 diff.lo, new->s##_lo, old->s##_lo); \
3331 ADD_64(estats->t##_hi, diff.hi, \
3332 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003333 } while (0)
3334
3335/* sum[hi:lo] += add */
3336#define ADD_EXTEND_64(s_hi, s_lo, a) \
3337 do { \
3338 s_lo += a; \
3339 s_hi += (s_lo < a) ? 1 : 0; \
3340 } while (0)
3341
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003342#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003343 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003344 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3345 pstats->mac_stx[1].s##_lo, \
3346 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003347 } while (0)
3348
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003349#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003350 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003351 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3352 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003353 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3354 } while (0)
3355
3356#define UPDATE_EXTEND_USTAT(s, t) \
3357 do { \
3358 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3359 old_uclient->s = uclient->s; \
3360 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003361 } while (0)
3362
3363#define UPDATE_EXTEND_XSTAT(s, t) \
3364 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003365 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3366 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003367 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3368 } while (0)
3369
3370/* minuend -= subtrahend */
3371#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3372 do { \
3373 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3374 } while (0)
3375
3376/* minuend[hi:lo] -= subtrahend */
3377#define SUB_EXTEND_64(m_hi, m_lo, s) \
3378 do { \
3379 SUB_64(m_hi, 0, m_lo, s); \
3380 } while (0)
3381
3382#define SUB_EXTEND_USTAT(s, t) \
3383 do { \
3384 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3385 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003386 } while (0)
3387
3388/*
3389 * General service functions
3390 */
3391
3392static inline long bnx2x_hilo(u32 *hiref)
3393{
3394 u32 lo = *(hiref + 1);
3395#if (BITS_PER_LONG == 64)
3396 u32 hi = *hiref;
3397
3398 return HILO_U64(hi, lo);
3399#else
3400 return lo;
3401#endif
3402}
3403
3404/*
3405 * Init service functions
3406 */
3407
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003408static void bnx2x_storm_stats_post(struct bnx2x *bp)
3409{
3410 if (!bp->stats_pending) {
3411 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003412 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003413
3414 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003415 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003416 for_each_queue(bp, i)
3417 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003418
3419 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3420 ((u32 *)&ramrod_data)[1],
3421 ((u32 *)&ramrod_data)[0], 0);
3422 if (rc == 0) {
3423 /* stats ramrod has it's own slot on the spq */
3424 bp->spq_left++;
3425 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003426 }
3427 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003428}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003429
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003430static void bnx2x_hw_stats_post(struct bnx2x *bp)
3431{
3432 struct dmae_command *dmae = &bp->stats_dmae;
3433 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3434
3435 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003436 if (CHIP_REV_IS_SLOW(bp))
3437 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003438
3439 /* loader */
3440 if (bp->executer_idx) {
3441 int loader_idx = PMF_DMAE_C(bp);
3442
3443 memset(dmae, 0, sizeof(struct dmae_command));
3444
3445 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3446 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3447 DMAE_CMD_DST_RESET |
3448#ifdef __BIG_ENDIAN
3449 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3450#else
3451 DMAE_CMD_ENDIANITY_DW_SWAP |
3452#endif
3453 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3454 DMAE_CMD_PORT_0) |
3455 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3456 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3458 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3459 sizeof(struct dmae_command) *
3460 (loader_idx + 1)) >> 2;
3461 dmae->dst_addr_hi = 0;
3462 dmae->len = sizeof(struct dmae_command) >> 2;
3463 if (CHIP_IS_E1(bp))
3464 dmae->len--;
3465 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3466 dmae->comp_addr_hi = 0;
3467 dmae->comp_val = 1;
3468
3469 *stats_comp = 0;
3470 bnx2x_post_dmae(bp, dmae, loader_idx);
3471
3472 } else if (bp->func_stx) {
3473 *stats_comp = 0;
3474 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3475 }
3476}
3477
3478static int bnx2x_stats_comp(struct bnx2x *bp)
3479{
3480 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3481 int cnt = 10;
3482
3483 might_sleep();
3484 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003485 if (!cnt) {
3486 BNX2X_ERR("timeout waiting for stats finished\n");
3487 break;
3488 }
3489 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003490 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003491 }
3492 return 1;
3493}
3494
3495/*
3496 * Statistics service functions
3497 */
3498
3499static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3500{
3501 struct dmae_command *dmae;
3502 u32 opcode;
3503 int loader_idx = PMF_DMAE_C(bp);
3504 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3505
3506 /* sanity */
3507 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3508 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003509 return;
3510 }
3511
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003512 bp->executer_idx = 0;
3513
3514 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3515 DMAE_CMD_C_ENABLE |
3516 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3517#ifdef __BIG_ENDIAN
3518 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3519#else
3520 DMAE_CMD_ENDIANITY_DW_SWAP |
3521#endif
3522 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3523 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3524
3525 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3526 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3527 dmae->src_addr_lo = bp->port.port_stx >> 2;
3528 dmae->src_addr_hi = 0;
3529 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3531 dmae->len = DMAE_LEN32_RD_MAX;
3532 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3533 dmae->comp_addr_hi = 0;
3534 dmae->comp_val = 1;
3535
3536 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3538 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3539 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003540 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3541 DMAE_LEN32_RD_MAX * 4);
3542 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3543 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003544 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3545 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3547 dmae->comp_val = DMAE_COMP_VAL;
3548
3549 *stats_comp = 0;
3550 bnx2x_hw_stats_post(bp);
3551 bnx2x_stats_comp(bp);
3552}
3553
3554static void bnx2x_port_stats_init(struct bnx2x *bp)
3555{
3556 struct dmae_command *dmae;
3557 int port = BP_PORT(bp);
3558 int vn = BP_E1HVN(bp);
3559 u32 opcode;
3560 int loader_idx = PMF_DMAE_C(bp);
3561 u32 mac_addr;
3562 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3563
3564 /* sanity */
3565 if (!bp->link_vars.link_up || !bp->port.pmf) {
3566 BNX2X_ERR("BUG!\n");
3567 return;
3568 }
3569
3570 bp->executer_idx = 0;
3571
3572 /* MCP */
3573 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3574 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3575 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3576#ifdef __BIG_ENDIAN
3577 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3578#else
3579 DMAE_CMD_ENDIANITY_DW_SWAP |
3580#endif
3581 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3582 (vn << DMAE_CMD_E1HVN_SHIFT));
3583
3584 if (bp->port.port_stx) {
3585
3586 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3587 dmae->opcode = opcode;
3588 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3590 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3591 dmae->dst_addr_hi = 0;
3592 dmae->len = sizeof(struct host_port_stats) >> 2;
3593 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3594 dmae->comp_addr_hi = 0;
3595 dmae->comp_val = 1;
3596 }
3597
3598 if (bp->func_stx) {
3599
3600 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3601 dmae->opcode = opcode;
3602 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3604 dmae->dst_addr_lo = bp->func_stx >> 2;
3605 dmae->dst_addr_hi = 0;
3606 dmae->len = sizeof(struct host_func_stats) >> 2;
3607 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608 dmae->comp_addr_hi = 0;
3609 dmae->comp_val = 1;
3610 }
3611
3612 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003613 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3614 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3615 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3616#ifdef __BIG_ENDIAN
3617 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3618#else
3619 DMAE_CMD_ENDIANITY_DW_SWAP |
3620#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003621 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3622 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003623
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003624 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003625
3626 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3627 NIG_REG_INGRESS_BMAC0_MEM);
3628
3629 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3630 BIGMAC_REGISTER_TX_STAT_GTBYT */
3631 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3632 dmae->opcode = opcode;
3633 dmae->src_addr_lo = (mac_addr +
3634 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3635 dmae->src_addr_hi = 0;
3636 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3638 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3639 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3640 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3641 dmae->comp_addr_hi = 0;
3642 dmae->comp_val = 1;
3643
3644 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3645 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3646 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647 dmae->opcode = opcode;
3648 dmae->src_addr_lo = (mac_addr +
3649 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3650 dmae->src_addr_hi = 0;
3651 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003652 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003654 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003655 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3656 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3657 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3658 dmae->comp_addr_hi = 0;
3659 dmae->comp_val = 1;
3660
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003661 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003662
3663 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3664
3665 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3666 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3667 dmae->opcode = opcode;
3668 dmae->src_addr_lo = (mac_addr +
3669 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3670 dmae->src_addr_hi = 0;
3671 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3673 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3674 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675 dmae->comp_addr_hi = 0;
3676 dmae->comp_val = 1;
3677
3678 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3679 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3680 dmae->opcode = opcode;
3681 dmae->src_addr_lo = (mac_addr +
3682 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3683 dmae->src_addr_hi = 0;
3684 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003685 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003686 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003687 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003688 dmae->len = 1;
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3691 dmae->comp_val = 1;
3692
3693 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695 dmae->opcode = opcode;
3696 dmae->src_addr_lo = (mac_addr +
3697 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3698 dmae->src_addr_hi = 0;
3699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003700 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003702 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003703 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3706 dmae->comp_val = 1;
3707 }
3708
3709 /* NIG */
3710 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003711 dmae->opcode = opcode;
3712 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3713 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3714 dmae->src_addr_hi = 0;
3715 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3717 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3718 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3719 dmae->comp_addr_hi = 0;
3720 dmae->comp_val = 1;
3721
3722 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3723 dmae->opcode = opcode;
3724 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3725 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3726 dmae->src_addr_hi = 0;
3727 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3728 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3729 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3730 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3731 dmae->len = (2*sizeof(u32)) >> 2;
3732 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3733 dmae->comp_addr_hi = 0;
3734 dmae->comp_val = 1;
3735
3736 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003737 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3738 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3739 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3740#ifdef __BIG_ENDIAN
3741 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3742#else
3743 DMAE_CMD_ENDIANITY_DW_SWAP |
3744#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003745 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3746 (vn << DMAE_CMD_E1HVN_SHIFT));
3747 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3748 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003749 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003750 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3751 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3752 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3753 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3754 dmae->len = (2*sizeof(u32)) >> 2;
3755 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3757 dmae->comp_val = DMAE_COMP_VAL;
3758
3759 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003760}
3761
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003762static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003763{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003764 struct dmae_command *dmae = &bp->stats_dmae;
3765 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003766
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003767 /* sanity */
3768 if (!bp->func_stx) {
3769 BNX2X_ERR("BUG!\n");
3770 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003771 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003772
3773 bp->executer_idx = 0;
3774 memset(dmae, 0, sizeof(struct dmae_command));
3775
3776 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3777 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3778 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3779#ifdef __BIG_ENDIAN
3780 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3781#else
3782 DMAE_CMD_ENDIANITY_DW_SWAP |
3783#endif
3784 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3785 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3786 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3788 dmae->dst_addr_lo = bp->func_stx >> 2;
3789 dmae->dst_addr_hi = 0;
3790 dmae->len = sizeof(struct host_func_stats) >> 2;
3791 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3793 dmae->comp_val = DMAE_COMP_VAL;
3794
3795 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003796}
3797
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003798static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003799{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003800 if (bp->port.pmf)
3801 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003802
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003803 else if (bp->func_stx)
3804 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003805
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003806 bnx2x_hw_stats_post(bp);
3807 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003808}
3809
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003810static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003811{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003812 bnx2x_stats_comp(bp);
3813 bnx2x_stats_pmf_update(bp);
3814 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003815}
3816
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003817static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003819 bnx2x_stats_comp(bp);
3820 bnx2x_stats_start(bp);
3821}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003822
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003823static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3824{
3825 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3826 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003827 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003828 struct {
3829 u32 lo;
3830 u32 hi;
3831 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003832
3833 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3834 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3835 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3836 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3837 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3838 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003839 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003841 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3843 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3844 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3845 UPDATE_STAT64(tx_stat_gt127,
3846 tx_stat_etherstatspkts65octetsto127octets);
3847 UPDATE_STAT64(tx_stat_gt255,
3848 tx_stat_etherstatspkts128octetsto255octets);
3849 UPDATE_STAT64(tx_stat_gt511,
3850 tx_stat_etherstatspkts256octetsto511octets);
3851 UPDATE_STAT64(tx_stat_gt1023,
3852 tx_stat_etherstatspkts512octetsto1023octets);
3853 UPDATE_STAT64(tx_stat_gt1518,
3854 tx_stat_etherstatspkts1024octetsto1522octets);
3855 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3856 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3857 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3858 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3859 UPDATE_STAT64(tx_stat_gterr,
3860 tx_stat_dot3statsinternalmactransmiterrors);
3861 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003862
3863 estats->pause_frames_received_hi =
3864 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3865 estats->pause_frames_received_lo =
3866 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3867
3868 estats->pause_frames_sent_hi =
3869 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3870 estats->pause_frames_sent_lo =
3871 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003872}
3873
3874static void bnx2x_emac_stats_update(struct bnx2x *bp)
3875{
3876 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3877 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003878 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003879
3880 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3881 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3884 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3885 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3886 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3887 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3889 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3890 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3891 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3892 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3893 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3894 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3895 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3896 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3897 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3910 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003911
3912 estats->pause_frames_received_hi =
3913 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3914 estats->pause_frames_received_lo =
3915 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3916 ADD_64(estats->pause_frames_received_hi,
3917 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3918 estats->pause_frames_received_lo,
3919 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3920
3921 estats->pause_frames_sent_hi =
3922 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3923 estats->pause_frames_sent_lo =
3924 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3925 ADD_64(estats->pause_frames_sent_hi,
3926 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3927 estats->pause_frames_sent_lo,
3928 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003929}
3930
3931static int bnx2x_hw_stats_update(struct bnx2x *bp)
3932{
3933 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3934 struct nig_stats *old = &(bp->port.old_nig_stats);
3935 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3936 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003937 struct {
3938 u32 lo;
3939 u32 hi;
3940 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003941 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003942
3943 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3944 bnx2x_bmac_stats_update(bp);
3945
3946 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3947 bnx2x_emac_stats_update(bp);
3948
3949 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003950 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003951 return -1;
3952 }
3953
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003954 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3955 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003956 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3957 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003958
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003959 UPDATE_STAT64_NIG(egress_mac_pkt0,
3960 etherstatspkts1024octetsto1522octets);
3961 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003962
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003963 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003964
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003965 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3966 sizeof(struct mac_stx));
3967 estats->brb_drop_hi = pstats->brb_drop_hi;
3968 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003969
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003970 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003971
Eilon Greensteinde832a52009-02-12 08:36:33 +00003972 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3973 if (nig_timer_max != estats->nig_timer_max) {
3974 estats->nig_timer_max = nig_timer_max;
3975 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3976 }
3977
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003978 return 0;
3979}
3980
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003981static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003982{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003983 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003984 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003985 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003986 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3987 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003988 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003989
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00003990 memcpy(&(fstats->total_bytes_received_hi),
3991 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00003992 sizeof(struct host_func_stats) - 2*sizeof(u32));
3993 estats->error_bytes_received_hi = 0;
3994 estats->error_bytes_received_lo = 0;
3995 estats->etherstatsoverrsizepkts_hi = 0;
3996 estats->etherstatsoverrsizepkts_lo = 0;
3997 estats->no_buff_discard_hi = 0;
3998 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003999
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004000 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004001 struct bnx2x_fastpath *fp = &bp->fp[i];
4002 int cl_id = fp->cl_id;
4003 struct tstorm_per_client_stats *tclient =
4004 &stats->tstorm_common.client_statistics[cl_id];
4005 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4006 struct ustorm_per_client_stats *uclient =
4007 &stats->ustorm_common.client_statistics[cl_id];
4008 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4009 struct xstorm_per_client_stats *xclient =
4010 &stats->xstorm_common.client_statistics[cl_id];
4011 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4012 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4013 u32 diff;
4014
4015 /* are storm stats valid? */
4016 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4017 bp->stats_counter) {
4018 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4019 " xstorm counter (%d) != stats_counter (%d)\n",
4020 i, xclient->stats_counter, bp->stats_counter);
4021 return -1;
4022 }
4023 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4024 bp->stats_counter) {
4025 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4026 " tstorm counter (%d) != stats_counter (%d)\n",
4027 i, tclient->stats_counter, bp->stats_counter);
4028 return -2;
4029 }
4030 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4031 bp->stats_counter) {
4032 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4033 " ustorm counter (%d) != stats_counter (%d)\n",
4034 i, uclient->stats_counter, bp->stats_counter);
4035 return -4;
4036 }
4037
4038 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004039 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004040 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004041 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4042
4043 ADD_64(qstats->total_bytes_received_hi,
4044 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4045 qstats->total_bytes_received_lo,
4046 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4047
4048 ADD_64(qstats->total_bytes_received_hi,
4049 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4050 qstats->total_bytes_received_lo,
4051 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4052
4053 qstats->valid_bytes_received_hi =
4054 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004055 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004056 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004057
Eilon Greensteinde832a52009-02-12 08:36:33 +00004058 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004059 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004060 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004061 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004062
4063 ADD_64(qstats->total_bytes_received_hi,
4064 qstats->error_bytes_received_hi,
4065 qstats->total_bytes_received_lo,
4066 qstats->error_bytes_received_lo);
4067
4068 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4069 total_unicast_packets_received);
4070 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4071 total_multicast_packets_received);
4072 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4073 total_broadcast_packets_received);
4074 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4075 etherstatsoverrsizepkts);
4076 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4077
4078 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4079 total_unicast_packets_received);
4080 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4081 total_multicast_packets_received);
4082 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4083 total_broadcast_packets_received);
4084 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4086 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4087
4088 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004089 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004090 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004091 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4092
4093 ADD_64(qstats->total_bytes_transmitted_hi,
4094 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4095 qstats->total_bytes_transmitted_lo,
4096 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4097
4098 ADD_64(qstats->total_bytes_transmitted_hi,
4099 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4100 qstats->total_bytes_transmitted_lo,
4101 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004102
4103 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4104 total_unicast_packets_transmitted);
4105 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4106 total_multicast_packets_transmitted);
4107 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4108 total_broadcast_packets_transmitted);
4109
4110 old_tclient->checksum_discard = tclient->checksum_discard;
4111 old_tclient->ttl0_discard = tclient->ttl0_discard;
4112
4113 ADD_64(fstats->total_bytes_received_hi,
4114 qstats->total_bytes_received_hi,
4115 fstats->total_bytes_received_lo,
4116 qstats->total_bytes_received_lo);
4117 ADD_64(fstats->total_bytes_transmitted_hi,
4118 qstats->total_bytes_transmitted_hi,
4119 fstats->total_bytes_transmitted_lo,
4120 qstats->total_bytes_transmitted_lo);
4121 ADD_64(fstats->total_unicast_packets_received_hi,
4122 qstats->total_unicast_packets_received_hi,
4123 fstats->total_unicast_packets_received_lo,
4124 qstats->total_unicast_packets_received_lo);
4125 ADD_64(fstats->total_multicast_packets_received_hi,
4126 qstats->total_multicast_packets_received_hi,
4127 fstats->total_multicast_packets_received_lo,
4128 qstats->total_multicast_packets_received_lo);
4129 ADD_64(fstats->total_broadcast_packets_received_hi,
4130 qstats->total_broadcast_packets_received_hi,
4131 fstats->total_broadcast_packets_received_lo,
4132 qstats->total_broadcast_packets_received_lo);
4133 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4134 qstats->total_unicast_packets_transmitted_hi,
4135 fstats->total_unicast_packets_transmitted_lo,
4136 qstats->total_unicast_packets_transmitted_lo);
4137 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4138 qstats->total_multicast_packets_transmitted_hi,
4139 fstats->total_multicast_packets_transmitted_lo,
4140 qstats->total_multicast_packets_transmitted_lo);
4141 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4142 qstats->total_broadcast_packets_transmitted_hi,
4143 fstats->total_broadcast_packets_transmitted_lo,
4144 qstats->total_broadcast_packets_transmitted_lo);
4145 ADD_64(fstats->valid_bytes_received_hi,
4146 qstats->valid_bytes_received_hi,
4147 fstats->valid_bytes_received_lo,
4148 qstats->valid_bytes_received_lo);
4149
4150 ADD_64(estats->error_bytes_received_hi,
4151 qstats->error_bytes_received_hi,
4152 estats->error_bytes_received_lo,
4153 qstats->error_bytes_received_lo);
4154 ADD_64(estats->etherstatsoverrsizepkts_hi,
4155 qstats->etherstatsoverrsizepkts_hi,
4156 estats->etherstatsoverrsizepkts_lo,
4157 qstats->etherstatsoverrsizepkts_lo);
4158 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4159 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4160 }
4161
4162 ADD_64(fstats->total_bytes_received_hi,
4163 estats->rx_stat_ifhcinbadoctets_hi,
4164 fstats->total_bytes_received_lo,
4165 estats->rx_stat_ifhcinbadoctets_lo);
4166
4167 memcpy(estats, &(fstats->total_bytes_received_hi),
4168 sizeof(struct host_func_stats) - 2*sizeof(u32));
4169
4170 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171 estats->rx_stat_dot3statsframestoolong_hi,
4172 estats->etherstatsoverrsizepkts_lo,
4173 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004174 ADD_64(estats->error_bytes_received_hi,
4175 estats->rx_stat_ifhcinbadoctets_hi,
4176 estats->error_bytes_received_lo,
4177 estats->rx_stat_ifhcinbadoctets_lo);
4178
Eilon Greensteinde832a52009-02-12 08:36:33 +00004179 if (bp->port.pmf) {
4180 estats->mac_filter_discard =
4181 le32_to_cpu(tport->mac_filter_discard);
4182 estats->xxoverflow_discard =
4183 le32_to_cpu(tport->xxoverflow_discard);
4184 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004185 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004186 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4187 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004188
4189 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4190
Eilon Greensteinde832a52009-02-12 08:36:33 +00004191 bp->stats_pending = 0;
4192
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004193 return 0;
4194}
4195
4196static void bnx2x_net_stats_update(struct bnx2x *bp)
4197{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004198 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004199 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004200 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004201
4202 nstats->rx_packets =
4203 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4205 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4206
4207 nstats->tx_packets =
4208 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4210 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4211
Eilon Greensteinde832a52009-02-12 08:36:33 +00004212 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004213
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004214 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004215
Eilon Greensteinde832a52009-02-12 08:36:33 +00004216 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004217 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004218 nstats->rx_dropped +=
4219 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4220
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004221 nstats->tx_dropped = 0;
4222
4223 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004224 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004225
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004226 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004227 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004228
4229 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004230 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4231 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4232 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4233 bnx2x_hilo(&estats->brb_truncate_hi);
4234 nstats->rx_crc_errors =
4235 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4236 nstats->rx_frame_errors =
4237 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4238 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004239 nstats->rx_missed_errors = estats->xxoverflow_discard;
4240
4241 nstats->rx_errors = nstats->rx_length_errors +
4242 nstats->rx_over_errors +
4243 nstats->rx_crc_errors +
4244 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004245 nstats->rx_fifo_errors +
4246 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004247
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004248 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004249 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4250 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4251 nstats->tx_carrier_errors =
4252 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004253 nstats->tx_fifo_errors = 0;
4254 nstats->tx_heartbeat_errors = 0;
4255 nstats->tx_window_errors = 0;
4256
4257 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004258 nstats->tx_carrier_errors +
4259 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4260}
4261
4262static void bnx2x_drv_stats_update(struct bnx2x *bp)
4263{
4264 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4265 int i;
4266
4267 estats->driver_xoff = 0;
4268 estats->rx_err_discard_pkt = 0;
4269 estats->rx_skb_alloc_failed = 0;
4270 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004271 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004272 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4273
4274 estats->driver_xoff += qstats->driver_xoff;
4275 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4276 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4277 estats->hw_csum_err += qstats->hw_csum_err;
4278 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004279}
4280
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004281static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004282{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004283 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004284
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004285 if (*stats_comp != DMAE_COMP_VAL)
4286 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004287
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004288 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004289 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004290
Eilon Greensteinde832a52009-02-12 08:36:33 +00004291 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4292 BNX2X_ERR("storm stats were not updated for 3 times\n");
4293 bnx2x_panic();
4294 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004295 }
4296
Eilon Greensteinde832a52009-02-12 08:36:33 +00004297 bnx2x_net_stats_update(bp);
4298 bnx2x_drv_stats_update(bp);
4299
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004300 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004301 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004302 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004303 struct tstorm_per_client_stats *old_tclient =
4304 &bp->fp->old_tclient;
4305 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004306 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004307 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004308 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004309
4310 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4311 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4312 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004313 bnx2x_tx_avail(fp0_tx),
4314 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004315 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4316 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004317 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4318 fp0_rx->rx_comp_cons),
4319 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004320 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4321 "brb truncate %u\n",
4322 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4323 qstats->driver_xoff,
4324 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004325 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004326 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004327 "mac_discard %u mac_filter_discard %u "
4328 "xxovrflow_discard %u brb_truncate_discard %u "
4329 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004330 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004331 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4332 bnx2x_hilo(&qstats->no_buff_discard_hi),
4333 estats->mac_discard, estats->mac_filter_discard,
4334 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004335 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004336
4337 for_each_queue(bp, i) {
4338 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4339 bnx2x_fp(bp, i, tx_pkt),
4340 bnx2x_fp(bp, i, rx_pkt),
4341 bnx2x_fp(bp, i, rx_calls));
4342 }
4343 }
4344
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004345 bnx2x_hw_stats_post(bp);
4346 bnx2x_storm_stats_post(bp);
4347}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004348
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004349static void bnx2x_port_stats_stop(struct bnx2x *bp)
4350{
4351 struct dmae_command *dmae;
4352 u32 opcode;
4353 int loader_idx = PMF_DMAE_C(bp);
4354 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004355
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004356 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004357
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004358 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4359 DMAE_CMD_C_ENABLE |
4360 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004361#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004362 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004363#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004364 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004365#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004366 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4367 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4368
4369 if (bp->port.port_stx) {
4370
4371 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4372 if (bp->func_stx)
4373 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4374 else
4375 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4376 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4378 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004379 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004380 dmae->len = sizeof(struct host_port_stats) >> 2;
4381 if (bp->func_stx) {
4382 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4383 dmae->comp_addr_hi = 0;
4384 dmae->comp_val = 1;
4385 } else {
4386 dmae->comp_addr_lo =
4387 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4388 dmae->comp_addr_hi =
4389 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4390 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004391
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004392 *stats_comp = 0;
4393 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004394 }
4395
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004396 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004397
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4399 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4400 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4402 dmae->dst_addr_lo = bp->func_stx >> 2;
4403 dmae->dst_addr_hi = 0;
4404 dmae->len = sizeof(struct host_func_stats) >> 2;
4405 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4407 dmae->comp_val = DMAE_COMP_VAL;
4408
4409 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004410 }
4411}
4412
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004413static void bnx2x_stats_stop(struct bnx2x *bp)
4414{
4415 int update = 0;
4416
4417 bnx2x_stats_comp(bp);
4418
4419 if (bp->port.pmf)
4420 update = (bnx2x_hw_stats_update(bp) == 0);
4421
4422 update |= (bnx2x_storm_stats_update(bp) == 0);
4423
4424 if (update) {
4425 bnx2x_net_stats_update(bp);
4426
4427 if (bp->port.pmf)
4428 bnx2x_port_stats_stop(bp);
4429
4430 bnx2x_hw_stats_post(bp);
4431 bnx2x_stats_comp(bp);
4432 }
4433}
4434
4435static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4436{
4437}
4438
4439static const struct {
4440 void (*action)(struct bnx2x *bp);
4441 enum bnx2x_stats_state next_state;
4442} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4443/* state event */
4444{
4445/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4446/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4447/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4448/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4449},
4450{
4451/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4452/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4453/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4454/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4455}
4456};
4457
4458static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4459{
4460 enum bnx2x_stats_state state = bp->stats_state;
4461
4462 bnx2x_stats_stm[state][event].action(bp);
4463 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4464
Eilon Greenstein89246652009-08-12 08:23:56 +00004465 /* Make sure the state has been "changed" */
4466 smp_wmb();
4467
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004468 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4469 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4470 state, event, bp->stats_state);
4471}
4472
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004473static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4474{
4475 struct dmae_command *dmae;
4476 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4477
4478 /* sanity */
4479 if (!bp->port.pmf || !bp->port.port_stx) {
4480 BNX2X_ERR("BUG!\n");
4481 return;
4482 }
4483
4484 bp->executer_idx = 0;
4485
4486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4487 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4488 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4489 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4490#ifdef __BIG_ENDIAN
4491 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4492#else
4493 DMAE_CMD_ENDIANITY_DW_SWAP |
4494#endif
4495 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4496 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4497 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4499 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4500 dmae->dst_addr_hi = 0;
4501 dmae->len = sizeof(struct host_port_stats) >> 2;
4502 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4504 dmae->comp_val = DMAE_COMP_VAL;
4505
4506 *stats_comp = 0;
4507 bnx2x_hw_stats_post(bp);
4508 bnx2x_stats_comp(bp);
4509}
4510
4511static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4512{
4513 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4514 int port = BP_PORT(bp);
4515 int func;
4516 u32 func_stx;
4517
4518 /* sanity */
4519 if (!bp->port.pmf || !bp->func_stx) {
4520 BNX2X_ERR("BUG!\n");
4521 return;
4522 }
4523
4524 /* save our func_stx */
4525 func_stx = bp->func_stx;
4526
4527 for (vn = VN_0; vn < vn_max; vn++) {
4528 func = 2*vn + port;
4529
4530 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4531 bnx2x_func_stats_init(bp);
4532 bnx2x_hw_stats_post(bp);
4533 bnx2x_stats_comp(bp);
4534 }
4535
4536 /* restore our func_stx */
4537 bp->func_stx = func_stx;
4538}
4539
4540static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4541{
4542 struct dmae_command *dmae = &bp->stats_dmae;
4543 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4544
4545 /* sanity */
4546 if (!bp->func_stx) {
4547 BNX2X_ERR("BUG!\n");
4548 return;
4549 }
4550
4551 bp->executer_idx = 0;
4552 memset(dmae, 0, sizeof(struct dmae_command));
4553
4554 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4555 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4556 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4557#ifdef __BIG_ENDIAN
4558 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4559#else
4560 DMAE_CMD_ENDIANITY_DW_SWAP |
4561#endif
4562 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4563 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4564 dmae->src_addr_lo = bp->func_stx >> 2;
4565 dmae->src_addr_hi = 0;
4566 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4568 dmae->len = sizeof(struct host_func_stats) >> 2;
4569 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4571 dmae->comp_val = DMAE_COMP_VAL;
4572
4573 *stats_comp = 0;
4574 bnx2x_hw_stats_post(bp);
4575 bnx2x_stats_comp(bp);
4576}
4577
4578static void bnx2x_stats_init(struct bnx2x *bp)
4579{
4580 int port = BP_PORT(bp);
4581 int func = BP_FUNC(bp);
4582 int i;
4583
4584 bp->stats_pending = 0;
4585 bp->executer_idx = 0;
4586 bp->stats_counter = 0;
4587
4588 /* port and func stats for management */
4589 if (!BP_NOMCP(bp)) {
4590 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4591 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4592
4593 } else {
4594 bp->port.port_stx = 0;
4595 bp->func_stx = 0;
4596 }
4597 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4598 bp->port.port_stx, bp->func_stx);
4599
4600 /* port stats */
4601 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4602 bp->port.old_nig_stats.brb_discard =
4603 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4604 bp->port.old_nig_stats.brb_truncate =
4605 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4606 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4607 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4608 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4609 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4610
4611 /* function stats */
4612 for_each_queue(bp, i) {
4613 struct bnx2x_fastpath *fp = &bp->fp[i];
4614
4615 memset(&fp->old_tclient, 0,
4616 sizeof(struct tstorm_per_client_stats));
4617 memset(&fp->old_uclient, 0,
4618 sizeof(struct ustorm_per_client_stats));
4619 memset(&fp->old_xclient, 0,
4620 sizeof(struct xstorm_per_client_stats));
4621 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4622 }
4623
4624 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4625 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4626
4627 bp->stats_state = STATS_STATE_DISABLED;
4628
4629 if (bp->port.pmf) {
4630 if (bp->port.port_stx)
4631 bnx2x_port_stats_base_init(bp);
4632
4633 if (bp->func_stx)
4634 bnx2x_func_stats_base_init(bp);
4635
4636 } else if (bp->func_stx)
4637 bnx2x_func_stats_base_update(bp);
4638}
4639
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004640static void bnx2x_timer(unsigned long data)
4641{
4642 struct bnx2x *bp = (struct bnx2x *) data;
4643
4644 if (!netif_running(bp->dev))
4645 return;
4646
4647 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004648 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004649
4650 if (poll) {
4651 struct bnx2x_fastpath *fp = &bp->fp[0];
4652 int rc;
4653
Eilon Greenstein7961f792009-03-02 07:59:31 +00004654 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004655 rc = bnx2x_rx_int(fp, 1000);
4656 }
4657
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004658 if (!BP_NOMCP(bp)) {
4659 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004660 u32 drv_pulse;
4661 u32 mcp_pulse;
4662
4663 ++bp->fw_drv_pulse_wr_seq;
4664 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4665 /* TBD - add SYSTEM_TIME */
4666 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004667 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004668
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004669 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004670 MCP_PULSE_SEQ_MASK);
4671 /* The delta between driver pulse and mcp response
4672 * should be 1 (before mcp response) or 0 (after mcp response)
4673 */
4674 if ((drv_pulse != mcp_pulse) &&
4675 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4676 /* someone lost a heartbeat... */
4677 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4678 drv_pulse, mcp_pulse);
4679 }
4680 }
4681
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004682 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004683 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004684
Eliezer Tamirf1410642008-02-28 11:51:50 -08004685timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004686 mod_timer(&bp->timer, jiffies + bp->current_interval);
4687}
4688
4689/* end of Statistics */
4690
4691/* nic init */
4692
4693/*
4694 * nic init service functions
4695 */
4696
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004697static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004698{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004699 int port = BP_PORT(bp);
4700
Eilon Greensteinca003922009-08-12 22:53:28 -07004701 /* "CSTORM" */
4702 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4703 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4704 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4705 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4706 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4707 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004708}
4709
Eilon Greenstein5c862842008-08-13 15:51:48 -07004710static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4711 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004712{
4713 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004714 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004715 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004716 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004717
4718 /* USTORM */
4719 section = ((u64)mapping) + offsetof(struct host_status_block,
4720 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004721 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004722
Eilon Greensteinca003922009-08-12 22:53:28 -07004723 REG_WR(bp, BAR_CSTRORM_INTMEM +
4724 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4725 REG_WR(bp, BAR_CSTRORM_INTMEM +
4726 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004727 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004728 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4729 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004730
4731 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004732 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4733 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004734
4735 /* CSTORM */
4736 section = ((u64)mapping) + offsetof(struct host_status_block,
4737 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004738 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004739
4740 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004741 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004742 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004743 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004744 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004746 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004747
4748 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004750 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004751
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004752 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4753}
4754
4755static void bnx2x_zero_def_sb(struct bnx2x *bp)
4756{
4757 int func = BP_FUNC(bp);
4758
Eilon Greensteinca003922009-08-12 22:53:28 -07004759 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004760 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4761 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004762 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4763 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4764 sizeof(struct cstorm_def_status_block_u)/4);
4765 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4766 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4767 sizeof(struct cstorm_def_status_block_c)/4);
4768 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004769 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4770 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004771}
4772
4773static void bnx2x_init_def_sb(struct bnx2x *bp,
4774 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004775 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004776{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004777 int port = BP_PORT(bp);
4778 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004779 int index, val, reg_offset;
4780 u64 section;
4781
4782 /* ATTN */
4783 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4784 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004785 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004786
Eliezer Tamir49d66772008-02-28 11:53:13 -08004787 bp->attn_state = 0;
4788
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004789 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4790 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4791
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004793 bp->attn_group[index].sig[0] = REG_RD(bp,
4794 reg_offset + 0x10*index);
4795 bp->attn_group[index].sig[1] = REG_RD(bp,
4796 reg_offset + 0x4 + 0x10*index);
4797 bp->attn_group[index].sig[2] = REG_RD(bp,
4798 reg_offset + 0x8 + 0x10*index);
4799 bp->attn_group[index].sig[3] = REG_RD(bp,
4800 reg_offset + 0xc + 0x10*index);
4801 }
4802
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004803 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4804 HC_REG_ATTN_MSG0_ADDR_L);
4805
4806 REG_WR(bp, reg_offset, U64_LO(section));
4807 REG_WR(bp, reg_offset + 4, U64_HI(section));
4808
4809 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4810
4811 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004812 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004813 REG_WR(bp, reg_offset, val);
4814
4815 /* USTORM */
4816 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4817 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004818 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004819
Eilon Greensteinca003922009-08-12 22:53:28 -07004820 REG_WR(bp, BAR_CSTRORM_INTMEM +
4821 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4822 REG_WR(bp, BAR_CSTRORM_INTMEM +
4823 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004824 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004825 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4826 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004827
4828 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004829 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4830 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004831
4832 /* CSTORM */
4833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004835 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004836
4837 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004838 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004839 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004840 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004841 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004844
4845 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004847 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004848
4849 /* TSTORM */
4850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004852 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004853
4854 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004855 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004856 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004857 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004858 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004859 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004860 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004861
4862 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4863 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004864 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004865
4866 /* XSTORM */
4867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004869 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004870
4871 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004872 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004873 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004874 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004875 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004876 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004877 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004878
4879 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4880 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004881 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004882
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004883 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004884 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004885
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004886 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004887}
4888
4889static void bnx2x_update_coalesce(struct bnx2x *bp)
4890{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004891 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004892 int i;
4893
4894 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004895 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004896
4897 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004898 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4899 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4900 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004901 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07004902 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4903 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4904 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004905 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004906
4907 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4908 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004909 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4910 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004911 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004912 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004913 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4914 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004915 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004916 }
4917}
4918
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004919static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4920 struct bnx2x_fastpath *fp, int last)
4921{
4922 int i;
4923
4924 for (i = 0; i < last; i++) {
4925 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4926 struct sk_buff *skb = rx_buf->skb;
4927
4928 if (skb == NULL) {
4929 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4930 continue;
4931 }
4932
4933 if (fp->tpa_state[i] == BNX2X_TPA_START)
4934 pci_unmap_single(bp->pdev,
4935 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004936 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004937
4938 dev_kfree_skb(skb);
4939 rx_buf->skb = NULL;
4940 }
4941}
4942
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004943static void bnx2x_init_rx_rings(struct bnx2x *bp)
4944{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004945 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004946 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4947 ETH_MAX_AGGREGATION_QUEUES_E1H;
4948 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004949 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004950
Eilon Greenstein87942b42009-02-12 08:36:49 +00004951 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004952 DP(NETIF_MSG_IFUP,
4953 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004954
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004955 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004956
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004957 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004958 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004959
Eilon Greenstein32626232008-08-13 15:51:07 -07004960 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004961 fp->tpa_pool[i].skb =
4962 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4963 if (!fp->tpa_pool[i].skb) {
4964 BNX2X_ERR("Failed to allocate TPA "
4965 "skb pool for queue[%d] - "
4966 "disabling TPA on this "
4967 "queue!\n", j);
4968 bnx2x_free_tpa_pool(bp, fp, i);
4969 fp->disable_tpa = 1;
4970 break;
4971 }
4972 pci_unmap_addr_set((struct sw_rx_bd *)
4973 &bp->fp->tpa_pool[i],
4974 mapping, 0);
4975 fp->tpa_state[i] = BNX2X_TPA_STOP;
4976 }
4977 }
4978 }
4979
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004980 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004981 struct bnx2x_fastpath *fp = &bp->fp[j];
4982
4983 fp->rx_bd_cons = 0;
4984 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004985 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004986
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004987 /* "next page" elements initialization */
4988 /* SGE ring */
4989 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4990 struct eth_rx_sge *sge;
4991
4992 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4993 sge->addr_hi =
4994 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4995 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4996 sge->addr_lo =
4997 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4998 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4999 }
5000
5001 bnx2x_init_sge_ring_bit_mask(fp);
5002
5003 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005004 for (i = 1; i <= NUM_RX_RINGS; i++) {
5005 struct eth_rx_bd *rx_bd;
5006
5007 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5008 rx_bd->addr_hi =
5009 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005010 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005011 rx_bd->addr_lo =
5012 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005013 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005014 }
5015
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005016 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005017 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5018 struct eth_rx_cqe_next_page *nextpg;
5019
5020 nextpg = (struct eth_rx_cqe_next_page *)
5021 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5022 nextpg->addr_hi =
5023 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005024 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005025 nextpg->addr_lo =
5026 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005027 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005028 }
5029
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005030 /* Allocate SGEs and initialize the ring elements */
5031 for (i = 0, ring_prod = 0;
5032 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005033
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005034 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5035 BNX2X_ERR("was only able to allocate "
5036 "%d rx sges\n", i);
5037 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5038 /* Cleanup already allocated elements */
5039 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005040 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005041 fp->disable_tpa = 1;
5042 ring_prod = 0;
5043 break;
5044 }
5045 ring_prod = NEXT_SGE_IDX(ring_prod);
5046 }
5047 fp->rx_sge_prod = ring_prod;
5048
5049 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005050 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005051 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005052 for (i = 0; i < bp->rx_ring_size; i++) {
5053 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5054 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005055 "%d rx skbs on queue[%d]\n", i, j);
5056 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005057 break;
5058 }
5059 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005060 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005061 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005062 }
5063
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005064 fp->rx_bd_prod = ring_prod;
5065 /* must not have more available CQEs than BDs */
5066 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5067 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005068 fp->rx_pkt = fp->rx_calls = 0;
5069
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005070 /* Warning!
5071 * this will generate an interrupt (to the TSTORM)
5072 * must only be done after chip is initialized
5073 */
5074 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5075 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005076 if (j != 0)
5077 continue;
5078
5079 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005080 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005081 U64_LO(fp->rx_comp_mapping));
5082 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005083 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005084 U64_HI(fp->rx_comp_mapping));
5085 }
5086}
5087
5088static void bnx2x_init_tx_ring(struct bnx2x *bp)
5089{
5090 int i, j;
5091
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005092 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005093 struct bnx2x_fastpath *fp = &bp->fp[j];
5094
5095 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005096 struct eth_tx_next_bd *tx_next_bd =
5097 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005098
Eilon Greensteinca003922009-08-12 22:53:28 -07005099 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005101 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005102 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005103 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005104 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005105 }
5106
Eilon Greensteinca003922009-08-12 22:53:28 -07005107 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5108 fp->tx_db.data.zero_fill1 = 0;
5109 fp->tx_db.data.prod = 0;
5110
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005111 fp->tx_pkt_prod = 0;
5112 fp->tx_pkt_cons = 0;
5113 fp->tx_bd_prod = 0;
5114 fp->tx_bd_cons = 0;
5115 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5116 fp->tx_pkt = 0;
5117 }
5118}
5119
5120static void bnx2x_init_sp_ring(struct bnx2x *bp)
5121{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005122 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005123
5124 spin_lock_init(&bp->spq_lock);
5125
5126 bp->spq_left = MAX_SPQ_PENDING;
5127 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005128 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5129 bp->spq_prod_bd = bp->spq;
5130 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5131
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005132 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005133 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005134 REG_WR(bp,
5135 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136 U64_HI(bp->spq_mapping));
5137
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005138 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005139 bp->spq_prod_idx);
5140}
5141
5142static void bnx2x_init_context(struct bnx2x *bp)
5143{
5144 int i;
5145
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005146 /* Rx */
5147 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005148 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5149 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005150 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005151
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005152 context->ustorm_st_context.common.sb_index_numbers =
5153 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005154 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005155 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005156 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005157 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5158 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5159 context->ustorm_st_context.common.statistics_counter_id =
5160 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005161 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005162 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005163 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005164 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005165 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005166 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005167 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005168 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005169 if (!fp->disable_tpa) {
5170 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005171 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005172 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005173 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5174 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005175 context->ustorm_st_context.common.sge_page_base_hi =
5176 U64_HI(fp->rx_sge_mapping);
5177 context->ustorm_st_context.common.sge_page_base_lo =
5178 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005179
5180 context->ustorm_st_context.common.max_sges_for_packet =
5181 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5182 context->ustorm_st_context.common.max_sges_for_packet =
5183 ((context->ustorm_st_context.common.
5184 max_sges_for_packet + PAGES_PER_SGE - 1) &
5185 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005186 }
5187
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005188 context->ustorm_ag_context.cdu_usage =
5189 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5190 CDU_REGION_NUMBER_UCM_AG,
5191 ETH_CONNECTION_TYPE);
5192
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005193 context->xstorm_ag_context.cdu_reserved =
5194 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5195 CDU_REGION_NUMBER_XCM_AG,
5196 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005197 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005198
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005199 /* Tx */
5200 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005201 struct bnx2x_fastpath *fp = &bp->fp[i];
5202 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005203 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005204
5205 context->cstorm_st_context.sb_index_number =
5206 C_SB_ETH_TX_CQ_INDEX;
5207 context->cstorm_st_context.status_block_id = fp->sb_id;
5208
5209 context->xstorm_st_context.tx_bd_page_base_hi =
5210 U64_HI(fp->tx_desc_mapping);
5211 context->xstorm_st_context.tx_bd_page_base_lo =
5212 U64_LO(fp->tx_desc_mapping);
5213 context->xstorm_st_context.statistics_data = (fp->cl_id |
5214 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5215 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005216}
5217
5218static void bnx2x_init_ind_table(struct bnx2x *bp)
5219{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005220 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005221 int i;
5222
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005223 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005224 return;
5225
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005226 DP(NETIF_MSG_IFUP,
5227 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005228 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005229 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005230 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005231 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005232}
5233
Eliezer Tamir49d66772008-02-28 11:53:13 -08005234static void bnx2x_set_client_config(struct bnx2x *bp)
5235{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005236 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005237 int port = BP_PORT(bp);
5238 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005239
Eilon Greensteine7799c52009-01-14 21:30:27 -08005240 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005241 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005242 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5243 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005244#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005245 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005246 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005247 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005248 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5249 }
5250#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005251
5252 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005253 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5254
Eliezer Tamir49d66772008-02-28 11:53:13 -08005255 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005256 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005257 ((u32 *)&tstorm_client)[0]);
5258 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005259 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005260 ((u32 *)&tstorm_client)[1]);
5261 }
5262
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005263 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5264 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005265}
5266
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005267static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5268{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005269 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005270 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005271 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005272 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005273 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005274 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005275 /* All but management unicast packets should pass to the host as well */
5276 u32 llh_mask =
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5280 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005281
Eilon Greenstein3196a882008-08-13 15:58:49 -07005282 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005283
5284 switch (mode) {
5285 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005286 tstorm_mac_filter.ucast_drop_all = mask;
5287 tstorm_mac_filter.mcast_drop_all = mask;
5288 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005289 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005290
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005291 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005292 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005293 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005294
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005295 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005296 tstorm_mac_filter.mcast_accept_all = mask;
5297 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005298 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005299
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005300 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005301 tstorm_mac_filter.ucast_accept_all = mask;
5302 tstorm_mac_filter.mcast_accept_all = mask;
5303 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005304 /* pass management unicast packets as well */
5305 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005306 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005307
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005308 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005309 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5310 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005311 }
5312
Eilon Greenstein581ce432009-07-29 00:20:04 +00005313 REG_WR(bp,
5314 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5315 llh_mask);
5316
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005317 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5318 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005319 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005320 ((u32 *)&tstorm_mac_filter)[i]);
5321
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005322/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005323 ((u32 *)&tstorm_mac_filter)[i]); */
5324 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325
Eliezer Tamir49d66772008-02-28 11:53:13 -08005326 if (mode != BNX2X_RX_MODE_NONE)
5327 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005328}
5329
Eilon Greenstein471de712008-08-13 15:49:35 -07005330static void bnx2x_init_internal_common(struct bnx2x *bp)
5331{
5332 int i;
5333
5334 /* Zero this manually as its initialization is
5335 currently missing in the initTool */
5336 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5337 REG_WR(bp, BAR_USTRORM_INTMEM +
5338 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5339}
5340
5341static void bnx2x_init_internal_port(struct bnx2x *bp)
5342{
5343 int port = BP_PORT(bp);
5344
Eilon Greensteinca003922009-08-12 22:53:28 -07005345 REG_WR(bp,
5346 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5347 REG_WR(bp,
5348 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005349 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5351}
5352
5353static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005354{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005355 struct tstorm_eth_function_common_config tstorm_config = {0};
5356 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005357 int port = BP_PORT(bp);
5358 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005359 int i, j;
5360 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005361 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005362
5363 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005364 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005365 tstorm_config.rss_result_mask = MULTI_MASK;
5366 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005367
5368 /* Enable TPA if needed */
5369 if (bp->flags & TPA_ENABLE_FLAG)
5370 tstorm_config.config_flags |=
5371 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5372
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005373 if (IS_E1HMF(bp))
5374 tstorm_config.config_flags |=
5375 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005376
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005377 tstorm_config.leading_client_id = BP_L_ID(bp);
5378
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005379 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005380 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005381 (*(u32 *)&tstorm_config));
5382
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005383 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005384 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005385 bnx2x_set_storm_rx_mode(bp);
5386
Eilon Greensteinde832a52009-02-12 08:36:33 +00005387 for_each_queue(bp, i) {
5388 u8 cl_id = bp->fp[i].cl_id;
5389
5390 /* reset xstorm per client statistics */
5391 offset = BAR_XSTRORM_INTMEM +
5392 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5393 for (j = 0;
5394 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5395 REG_WR(bp, offset + j*4, 0);
5396
5397 /* reset tstorm per client statistics */
5398 offset = BAR_TSTRORM_INTMEM +
5399 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5400 for (j = 0;
5401 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5402 REG_WR(bp, offset + j*4, 0);
5403
5404 /* reset ustorm per client statistics */
5405 offset = BAR_USTRORM_INTMEM +
5406 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5407 for (j = 0;
5408 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5409 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005410 }
5411
5412 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005413 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005414
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005415 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005416 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005417 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005418 ((u32 *)&stats_flags)[1]);
5419
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005420 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005421 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005422 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005423 ((u32 *)&stats_flags)[1]);
5424
Eilon Greensteinde832a52009-02-12 08:36:33 +00005425 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5426 ((u32 *)&stats_flags)[0]);
5427 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5428 ((u32 *)&stats_flags)[1]);
5429
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005430 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005431 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005432 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005433 ((u32 *)&stats_flags)[1]);
5434
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005435 REG_WR(bp, BAR_XSTRORM_INTMEM +
5436 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5437 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5438 REG_WR(bp, BAR_XSTRORM_INTMEM +
5439 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5440 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5441
5442 REG_WR(bp, BAR_TSTRORM_INTMEM +
5443 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5444 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5445 REG_WR(bp, BAR_TSTRORM_INTMEM +
5446 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5447 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005448
Eilon Greensteinde832a52009-02-12 08:36:33 +00005449 REG_WR(bp, BAR_USTRORM_INTMEM +
5450 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5451 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5452 REG_WR(bp, BAR_USTRORM_INTMEM +
5453 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5454 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5455
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005456 if (CHIP_IS_E1H(bp)) {
5457 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5458 IS_E1HMF(bp));
5459 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5460 IS_E1HMF(bp));
5461 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5462 IS_E1HMF(bp));
5463 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5464 IS_E1HMF(bp));
5465
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005466 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5467 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005468 }
5469
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005470 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5471 max_agg_size =
5472 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5473 SGE_PAGE_SIZE * PAGES_PER_SGE),
5474 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005475 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005476 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005477
5478 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005479 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005480 U64_LO(fp->rx_comp_mapping));
5481 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005482 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005483 U64_HI(fp->rx_comp_mapping));
5484
Eilon Greensteinca003922009-08-12 22:53:28 -07005485 /* Next page */
5486 REG_WR(bp, BAR_USTRORM_INTMEM +
5487 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5488 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5489 REG_WR(bp, BAR_USTRORM_INTMEM +
5490 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5491 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5492
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005493 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005494 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005495 max_agg_size);
5496 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005497
Eilon Greenstein1c063282009-02-12 08:36:43 +00005498 /* dropless flow control */
5499 if (CHIP_IS_E1H(bp)) {
5500 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5501
5502 rx_pause.bd_thr_low = 250;
5503 rx_pause.cqe_thr_low = 250;
5504 rx_pause.cos = 1;
5505 rx_pause.sge_thr_low = 0;
5506 rx_pause.bd_thr_high = 350;
5507 rx_pause.cqe_thr_high = 350;
5508 rx_pause.sge_thr_high = 0;
5509
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005510 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005511 struct bnx2x_fastpath *fp = &bp->fp[i];
5512
5513 if (!fp->disable_tpa) {
5514 rx_pause.sge_thr_low = 150;
5515 rx_pause.sge_thr_high = 250;
5516 }
5517
5518
5519 offset = BAR_USTRORM_INTMEM +
5520 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5521 fp->cl_id);
5522 for (j = 0;
5523 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5524 j++)
5525 REG_WR(bp, offset + j*4,
5526 ((u32 *)&rx_pause)[j]);
5527 }
5528 }
5529
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005530 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5531
5532 /* Init rate shaping and fairness contexts */
5533 if (IS_E1HMF(bp)) {
5534 int vn;
5535
5536 /* During init there is no active link
5537 Until link is up, set link rate to 10Gbps */
5538 bp->link_vars.line_speed = SPEED_10000;
5539 bnx2x_init_port_minmax(bp);
5540
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005541 if (!BP_NOMCP(bp))
5542 bp->mf_config =
5543 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005544 bnx2x_calc_vn_weight_sum(bp);
5545
5546 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5547 bnx2x_init_vn_minmax(bp, 2*vn + port);
5548
5549 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005550 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005551 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005552
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005553 } else {
5554 /* rate shaping and fairness are disabled */
5555 DP(NETIF_MSG_IFUP,
5556 "single function mode minmax will be disabled\n");
5557 }
5558
5559
5560 /* Store it to internal memory */
5561 if (bp->port.pmf)
5562 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5563 REG_WR(bp, BAR_XSTRORM_INTMEM +
5564 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5565 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005566}
5567
Eilon Greenstein471de712008-08-13 15:49:35 -07005568static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5569{
5570 switch (load_code) {
5571 case FW_MSG_CODE_DRV_LOAD_COMMON:
5572 bnx2x_init_internal_common(bp);
5573 /* no break */
5574
5575 case FW_MSG_CODE_DRV_LOAD_PORT:
5576 bnx2x_init_internal_port(bp);
5577 /* no break */
5578
5579 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5580 bnx2x_init_internal_func(bp);
5581 break;
5582
5583 default:
5584 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5585 break;
5586 }
5587}
5588
5589static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005590{
5591 int i;
5592
5593 for_each_queue(bp, i) {
5594 struct bnx2x_fastpath *fp = &bp->fp[i];
5595
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005596 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005597 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005598 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005599 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005600#ifdef BCM_CNIC
5601 fp->sb_id = fp->cl_id + 1;
5602#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005603 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005604#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005605 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005606 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5607 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005608 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005609 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005610 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005611 }
5612
Eilon Greenstein16119782009-03-02 07:59:27 +00005613 /* ensure status block indices were read */
5614 rmb();
5615
5616
Eilon Greenstein5c862842008-08-13 15:51:48 -07005617 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5618 DEF_SB_ID);
5619 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005620 bnx2x_update_coalesce(bp);
5621 bnx2x_init_rx_rings(bp);
5622 bnx2x_init_tx_ring(bp);
5623 bnx2x_init_sp_ring(bp);
5624 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005625 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005626 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005627 bnx2x_stats_init(bp);
5628
5629 /* At this point, we are ready for interrupts */
5630 atomic_set(&bp->intr_sem, 0);
5631
5632 /* flush all before enabling interrupts */
5633 mb();
5634 mmiowb();
5635
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005636 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005637
5638 /* Check for SPIO5 */
5639 bnx2x_attn_int_deasserted0(bp,
5640 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5641 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005642}
5643
5644/* end of nic init */
5645
5646/*
5647 * gzip service functions
5648 */
5649
5650static int bnx2x_gunzip_init(struct bnx2x *bp)
5651{
5652 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5653 &bp->gunzip_mapping);
5654 if (bp->gunzip_buf == NULL)
5655 goto gunzip_nomem1;
5656
5657 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5658 if (bp->strm == NULL)
5659 goto gunzip_nomem2;
5660
5661 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5662 GFP_KERNEL);
5663 if (bp->strm->workspace == NULL)
5664 goto gunzip_nomem3;
5665
5666 return 0;
5667
5668gunzip_nomem3:
5669 kfree(bp->strm);
5670 bp->strm = NULL;
5671
5672gunzip_nomem2:
5673 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5674 bp->gunzip_mapping);
5675 bp->gunzip_buf = NULL;
5676
5677gunzip_nomem1:
5678 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005679 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005680 return -ENOMEM;
5681}
5682
5683static void bnx2x_gunzip_end(struct bnx2x *bp)
5684{
5685 kfree(bp->strm->workspace);
5686
5687 kfree(bp->strm);
5688 bp->strm = NULL;
5689
5690 if (bp->gunzip_buf) {
5691 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5692 bp->gunzip_mapping);
5693 bp->gunzip_buf = NULL;
5694 }
5695}
5696
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005697static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005698{
5699 int n, rc;
5700
5701 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005702 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5703 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005704 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005705 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005706
5707 n = 10;
5708
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005709#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005710
5711 if (zbuf[3] & FNAME)
5712 while ((zbuf[n++] != 0) && (n < len));
5713
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005714 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005715 bp->strm->avail_in = len - n;
5716 bp->strm->next_out = bp->gunzip_buf;
5717 bp->strm->avail_out = FW_BUF_SIZE;
5718
5719 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5720 if (rc != Z_OK)
5721 return rc;
5722
5723 rc = zlib_inflate(bp->strm, Z_FINISH);
5724 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5725 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5726 bp->dev->name, bp->strm->msg);
5727
5728 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5729 if (bp->gunzip_outlen & 0x3)
5730 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5731 " gunzip_outlen (%d) not aligned\n",
5732 bp->dev->name, bp->gunzip_outlen);
5733 bp->gunzip_outlen >>= 2;
5734
5735 zlib_inflateEnd(bp->strm);
5736
5737 if (rc == Z_STREAM_END)
5738 return 0;
5739
5740 return rc;
5741}
5742
5743/* nic load/unload */
5744
5745/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005746 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005747 */
5748
5749/* send a NIG loopback debug packet */
5750static void bnx2x_lb_pckt(struct bnx2x *bp)
5751{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005752 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005753
5754 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005755 wb_write[0] = 0x55555555;
5756 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005757 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005758 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005759
5760 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005761 wb_write[0] = 0x09000000;
5762 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005763 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005764 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005765}
5766
5767/* some of the internal memories
5768 * are not directly readable from the driver
5769 * to test them we send debug packets
5770 */
5771static int bnx2x_int_mem_test(struct bnx2x *bp)
5772{
5773 int factor;
5774 int count, i;
5775 u32 val = 0;
5776
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005777 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005778 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005779 else if (CHIP_REV_IS_EMUL(bp))
5780 factor = 200;
5781 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005782 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005783
5784 DP(NETIF_MSG_HW, "start part1\n");
5785
5786 /* Disable inputs of parser neighbor blocks */
5787 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5788 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5789 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005790 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005791
5792 /* Write 0 to parser credits for CFC search request */
5793 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5794
5795 /* send Ethernet packet */
5796 bnx2x_lb_pckt(bp);
5797
5798 /* TODO do i reset NIG statistic? */
5799 /* Wait until NIG register shows 1 packet of size 0x10 */
5800 count = 1000 * factor;
5801 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005802
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005803 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5804 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005805 if (val == 0x10)
5806 break;
5807
5808 msleep(10);
5809 count--;
5810 }
5811 if (val != 0x10) {
5812 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5813 return -1;
5814 }
5815
5816 /* Wait until PRS register shows 1 packet */
5817 count = 1000 * factor;
5818 while (count) {
5819 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005820 if (val == 1)
5821 break;
5822
5823 msleep(10);
5824 count--;
5825 }
5826 if (val != 0x1) {
5827 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5828 return -2;
5829 }
5830
5831 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005832 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005833 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005834 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005835 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005836 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5837 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005838
5839 DP(NETIF_MSG_HW, "part2\n");
5840
5841 /* Disable inputs of parser neighbor blocks */
5842 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5843 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5844 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005845 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005846
5847 /* Write 0 to parser credits for CFC search request */
5848 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5849
5850 /* send 10 Ethernet packets */
5851 for (i = 0; i < 10; i++)
5852 bnx2x_lb_pckt(bp);
5853
5854 /* Wait until NIG register shows 10 + 1
5855 packets of size 11*0x10 = 0xb0 */
5856 count = 1000 * factor;
5857 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005858
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005859 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5860 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005861 if (val == 0xb0)
5862 break;
5863
5864 msleep(10);
5865 count--;
5866 }
5867 if (val != 0xb0) {
5868 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5869 return -3;
5870 }
5871
5872 /* Wait until PRS register shows 2 packets */
5873 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5874 if (val != 2)
5875 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5876
5877 /* Write 1 to parser credits for CFC search request */
5878 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5879
5880 /* Wait until PRS register shows 3 packets */
5881 msleep(10 * factor);
5882 /* Wait until NIG register shows 1 packet of size 0x10 */
5883 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5884 if (val != 3)
5885 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5886
5887 /* clear NIG EOP FIFO */
5888 for (i = 0; i < 11; i++)
5889 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5890 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5891 if (val != 1) {
5892 BNX2X_ERR("clear of NIG failed\n");
5893 return -4;
5894 }
5895
5896 /* Reset and init BRB, PRS, NIG */
5897 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5898 msleep(50);
5899 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5900 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005901 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5902 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005903#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005904 /* set NIC mode */
5905 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5906#endif
5907
5908 /* Enable inputs of parser neighbor blocks */
5909 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5910 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5911 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005912 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005913
5914 DP(NETIF_MSG_HW, "done\n");
5915
5916 return 0; /* OK */
5917}
5918
5919static void enable_blocks_attention(struct bnx2x *bp)
5920{
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5922 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5923 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5924 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5925 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5926 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5928 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5929 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005930/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5931/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005932 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5933 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5934 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005935/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5936/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005937 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5939 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5940 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005941/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5942/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5943 if (CHIP_REV_IS_FPGA(bp))
5944 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5945 else
5946 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5948 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5949 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005950/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5951/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005952 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5953 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005954/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5955 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005956}
5957
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005958
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005959static void bnx2x_reset_common(struct bnx2x *bp)
5960{
5961 /* reset_common */
5962 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5963 0xd3ffff7f);
5964 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5965}
5966
Eilon Greenstein573f2032009-08-12 08:24:14 +00005967static void bnx2x_init_pxp(struct bnx2x *bp)
5968{
5969 u16 devctl;
5970 int r_order, w_order;
5971
5972 pci_read_config_word(bp->pdev,
5973 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5974 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5975 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5976 if (bp->mrrs == -1)
5977 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5978 else {
5979 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5980 r_order = bp->mrrs;
5981 }
5982
5983 bnx2x_init_pxp_arb(bp, r_order, w_order);
5984}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005985
5986static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5987{
5988 u32 val;
5989 u8 port;
5990 u8 is_required = 0;
5991
5992 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5993 SHARED_HW_CFG_FAN_FAILURE_MASK;
5994
5995 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5996 is_required = 1;
5997
5998 /*
5999 * The fan failure mechanism is usually related to the PHY type since
6000 * the power consumption of the board is affected by the PHY. Currently,
6001 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6002 */
6003 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6004 for (port = PORT_0; port < PORT_MAX; port++) {
6005 u32 phy_type =
6006 SHMEM_RD(bp, dev_info.port_hw_config[port].
6007 external_phy_config) &
6008 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6009 is_required |=
6010 ((phy_type ==
6011 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6012 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006013 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6014 (phy_type ==
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006015 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6016 }
6017
6018 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6019
6020 if (is_required == 0)
6021 return;
6022
6023 /* Fan failure is indicated by SPIO 5 */
6024 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6025 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6026
6027 /* set to active low mode */
6028 val = REG_RD(bp, MISC_REG_SPIO_INT);
6029 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6030 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6031 REG_WR(bp, MISC_REG_SPIO_INT, val);
6032
6033 /* enable interrupt to signal the IGU */
6034 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6035 val |= (1 << MISC_REGISTERS_SPIO_5);
6036 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6037}
6038
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006039static int bnx2x_init_common(struct bnx2x *bp)
6040{
6041 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006042#ifdef BCM_CNIC
6043 u32 wb_write[2];
6044#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006045
6046 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6047
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006048 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6050 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6051
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006052 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006053 if (CHIP_IS_E1H(bp))
6054 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6055
6056 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6057 msleep(30);
6058 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006060 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006061 if (CHIP_IS_E1(bp)) {
6062 /* enable HW interrupt from PXP on USDM overflow
6063 bit 16 on INT_MASK_0 */
6064 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006065 }
6066
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006067 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006068 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006069
6070#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006071 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6075 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006076 /* make sure this value is 0 */
6077 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006078
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006079/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6080 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6083 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006084#endif
6085
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006086 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006087#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006088 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6090 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006091#endif
6092
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006093 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6094 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006095
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006096 /* let the HW do it's magic ... */
6097 msleep(100);
6098 /* finish PXP init */
6099 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6100 if (val != 1) {
6101 BNX2X_ERR("PXP2 CFG failed\n");
6102 return -EBUSY;
6103 }
6104 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6105 if (val != 1) {
6106 BNX2X_ERR("PXP2 RD_INIT failed\n");
6107 return -EBUSY;
6108 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006109
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006110 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6111 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006112
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006113 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006114
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006115 /* clean the DMAE memory */
6116 bp->dmae_ready = 1;
6117 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006118
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006119 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6122 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006123
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006124 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6127 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6128
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006129 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006130
6131#ifdef BCM_CNIC
6132 wb_write[0] = 0;
6133 wb_write[1] = 0;
6134 for (i = 0; i < 64; i++) {
6135 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6136 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6137
6138 if (CHIP_IS_E1H(bp)) {
6139 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6140 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6141 wb_write, 2);
6142 }
6143 }
6144#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006145 /* soft reset pulse */
6146 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6147 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006148
Michael Chan37b091b2009-10-10 13:46:55 +00006149#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006150 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006152
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006153 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006154 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6155 if (!CHIP_REV_IS_SLOW(bp)) {
6156 /* enable hw interrupt from doorbell Q */
6157 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6158 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006159
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006160 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6161 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006162 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006163#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006164 /* set NIC mode */
6165 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006166#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006167 if (CHIP_IS_E1H(bp))
6168 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006169
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006170 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6173 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006174
Eilon Greensteinca003922009-08-12 22:53:28 -07006175 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6178 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006179
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006180 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6183 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006184
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006185 /* sync semi rtc */
6186 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6187 0x80000000);
6188 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6189 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006190
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006191 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6193 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006194
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006195 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6196 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6197 REG_WR(bp, i, 0xc0cac01a);
6198 /* TODO: replace with something meaningful */
6199 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006200 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006201#ifdef BCM_CNIC
6202 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6211 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6212#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006213 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006214
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006215 if (sizeof(union cdu_context) != 1024)
6216 /* we currently assume that a context is 1024 bytes */
6217 printk(KERN_ALERT PFX "please adjust the size of"
6218 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006219
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006220 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006221 val = (4 << 24) + (0 << 12) + 1024;
6222 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006223
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006224 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006225 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006226 /* enable context validation interrupt from CFC */
6227 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6228
6229 /* set the thresholds to prevent CFC/CDU race */
6230 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006231
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006232 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6233 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006234
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006235 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006236 /* Reset PCIE errors for debug */
6237 REG_WR(bp, 0x2814, 0xffffffff);
6238 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006239
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006240 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006241 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006242 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006243 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006244
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006245 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006246 if (CHIP_IS_E1H(bp)) {
6247 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6248 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6249 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006250
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006251 if (CHIP_REV_IS_SLOW(bp))
6252 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006253
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006254 /* finish CFC init */
6255 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6256 if (val != 1) {
6257 BNX2X_ERR("CFC LL_INIT failed\n");
6258 return -EBUSY;
6259 }
6260 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6261 if (val != 1) {
6262 BNX2X_ERR("CFC AC_INIT failed\n");
6263 return -EBUSY;
6264 }
6265 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6266 if (val != 1) {
6267 BNX2X_ERR("CFC CAM_INIT failed\n");
6268 return -EBUSY;
6269 }
6270 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006271
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006272 /* read NIG statistic
6273 to see if this is our first up since powerup */
6274 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6275 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006276
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006277 /* do internal memory self test */
6278 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6279 BNX2X_ERR("internal mem self test failed\n");
6280 return -EBUSY;
6281 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006282
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006283 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006288 bp->port.need_hw_lock = 1;
6289 break;
6290
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006291 default:
6292 break;
6293 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006294
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006295 bnx2x_setup_fan_failure_detection(bp);
6296
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006297 /* clear PXP2 attentions */
6298 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006299
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006300 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006301
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006302 if (!BP_NOMCP(bp)) {
6303 bnx2x_acquire_phy_lock(bp);
6304 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6305 bnx2x_release_phy_lock(bp);
6306 } else
6307 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6308
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006309 return 0;
6310}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006311
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006312static int bnx2x_init_port(struct bnx2x *bp)
6313{
6314 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006315 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006316 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006317 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006318
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006319 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6320
6321 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006322
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006323 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006324 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006325
6326 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6328 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006329 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006330
Michael Chan37b091b2009-10-10 13:46:55 +00006331#ifdef BCM_CNIC
6332 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006333
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006334 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006335 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6336 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006337#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006338 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006339
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006340 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006341 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6342 /* no pause for emulation and FPGA */
6343 low = 0;
6344 high = 513;
6345 } else {
6346 if (IS_E1HMF(bp))
6347 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6348 else if (bp->dev->mtu > 4096) {
6349 if (bp->flags & ONE_PORT_FLAG)
6350 low = 160;
6351 else {
6352 val = bp->dev->mtu;
6353 /* (24*1024 + val*4)/256 */
6354 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6355 }
6356 } else
6357 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6358 high = low + 56; /* 14*1024/256 */
6359 }
6360 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6361 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6362
6363
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006364 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006365
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006366 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006367 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006368 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006369 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006370
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006371 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6374 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006375
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006376 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006377 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006378
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006379 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006380
6381 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006382 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006383
6384 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006385 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006386 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006387 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006388
6389 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006390 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006391 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006392 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006393
Michael Chan37b091b2009-10-10 13:46:55 +00006394#ifdef BCM_CNIC
6395 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006396#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006397 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006398 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006399
6400 if (CHIP_IS_E1(bp)) {
6401 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6402 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6403 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006404 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006405
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006406 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006407 /* init aeu_mask_attn_func_0/1:
6408 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6409 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6410 * bits 4-7 are used for "per vn group attention" */
6411 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6412 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6413
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006414 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006415 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006416 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006417 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006418 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006419
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006420 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006421
6422 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6423
6424 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006425 /* 0x2 disable e1hov, 0x1 enable */
6426 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6427 (IS_E1HMF(bp) ? 0x1 : 0x2));
6428
Eilon Greenstein1c063282009-02-12 08:36:43 +00006429 {
6430 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6432 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6433 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006434 }
6435
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006436 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006437 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006438
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006439 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006440 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6441 {
6442 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6443
6444 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6445 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6446
6447 /* The GPIO should be swapped if the swap register is
6448 set and active */
6449 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6450 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6451
6452 /* Select function upon port-swap configuration */
6453 if (port == 0) {
6454 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6455 aeu_gpio_mask = (swap_val && swap_override) ?
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6457 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6458 } else {
6459 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6460 aeu_gpio_mask = (swap_val && swap_override) ?
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6462 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6463 }
6464 val = REG_RD(bp, offset);
6465 /* add GPIO3 to group */
6466 val |= aeu_gpio_mask;
6467 REG_WR(bp, offset, val);
6468 }
6469 break;
6470
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006472 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006473 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006474 {
6475 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6476 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6477 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006478 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006479 REG_WR(bp, reg_addr, val);
6480 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006481 break;
6482
6483 default:
6484 break;
6485 }
6486
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006487 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006488
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006489 return 0;
6490}
6491
6492#define ILT_PER_FUNC (768/2)
6493#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6494/* the phys address is shifted right 12 bits and has an added
6495 1=valid bit added to the 53rd bit
6496 then since this is a wide register(TM)
6497 we split it into two 32 bit writes
6498 */
6499#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6500#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6501#define PXP_ONE_ILT(x) (((x) << 10) | x)
6502#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6503
Michael Chan37b091b2009-10-10 13:46:55 +00006504#ifdef BCM_CNIC
6505#define CNIC_ILT_LINES 127
6506#define CNIC_CTX_PER_ILT 16
6507#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006508#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006509#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006510
6511static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6512{
6513 int reg;
6514
6515 if (CHIP_IS_E1H(bp))
6516 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6517 else /* E1 */
6518 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6519
6520 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6521}
6522
6523static int bnx2x_init_func(struct bnx2x *bp)
6524{
6525 int port = BP_PORT(bp);
6526 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006527 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006528 int i;
6529
6530 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6531
Eilon Greenstein8badd272009-02-12 08:36:15 +00006532 /* set MSI reconfigure capability */
6533 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6534 val = REG_RD(bp, addr);
6535 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6536 REG_WR(bp, addr, val);
6537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006538 i = FUNC_ILT_BASE(func);
6539
6540 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6541 if (CHIP_IS_E1H(bp)) {
6542 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6543 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6544 } else /* E1 */
6545 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6546 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6547
Michael Chan37b091b2009-10-10 13:46:55 +00006548#ifdef BCM_CNIC
6549 i += 1 + CNIC_ILT_LINES;
6550 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6551 if (CHIP_IS_E1(bp))
6552 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6553 else {
6554 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6555 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6556 }
6557
6558 i++;
6559 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6560 if (CHIP_IS_E1(bp))
6561 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6562 else {
6563 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6564 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6565 }
6566
6567 i++;
6568 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6569 if (CHIP_IS_E1(bp))
6570 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6571 else {
6572 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6573 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6574 }
6575
6576 /* tell the searcher where the T2 table is */
6577 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6578
6579 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6580 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6581
6582 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6583 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6584 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6585
6586 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6587#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006588
6589 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006590 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6598 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006599
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6601 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6602 }
6603
6604 /* HC init per function */
6605 if (CHIP_IS_E1H(bp)) {
6606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6607
6608 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6609 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6610 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006611 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006612
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006613 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006614 REG_WR(bp, 0x2114, 0xffffffff);
6615 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006616
6617 return 0;
6618}
6619
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006620static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6621{
6622 int i, rc = 0;
6623
6624 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6625 BP_FUNC(bp), load_code);
6626
6627 bp->dmae_ready = 0;
6628 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006629 rc = bnx2x_gunzip_init(bp);
6630 if (rc)
6631 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006632
6633 switch (load_code) {
6634 case FW_MSG_CODE_DRV_LOAD_COMMON:
6635 rc = bnx2x_init_common(bp);
6636 if (rc)
6637 goto init_hw_err;
6638 /* no break */
6639
6640 case FW_MSG_CODE_DRV_LOAD_PORT:
6641 bp->dmae_ready = 1;
6642 rc = bnx2x_init_port(bp);
6643 if (rc)
6644 goto init_hw_err;
6645 /* no break */
6646
6647 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6648 bp->dmae_ready = 1;
6649 rc = bnx2x_init_func(bp);
6650 if (rc)
6651 goto init_hw_err;
6652 break;
6653
6654 default:
6655 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6656 break;
6657 }
6658
6659 if (!BP_NOMCP(bp)) {
6660 int func = BP_FUNC(bp);
6661
6662 bp->fw_drv_pulse_wr_seq =
6663 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6664 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006665 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6666 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006667
6668 /* this needs to be done before gunzip end */
6669 bnx2x_zero_def_sb(bp);
6670 for_each_queue(bp, i)
6671 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006672#ifdef BCM_CNIC
6673 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6674#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006675
6676init_hw_err:
6677 bnx2x_gunzip_end(bp);
6678
6679 return rc;
6680}
6681
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006682static void bnx2x_free_mem(struct bnx2x *bp)
6683{
6684
6685#define BNX2X_PCI_FREE(x, y, size) \
6686 do { \
6687 if (x) { \
6688 pci_free_consistent(bp->pdev, size, x, y); \
6689 x = NULL; \
6690 y = 0; \
6691 } \
6692 } while (0)
6693
6694#define BNX2X_FREE(x) \
6695 do { \
6696 if (x) { \
6697 vfree(x); \
6698 x = NULL; \
6699 } \
6700 } while (0)
6701
6702 int i;
6703
6704 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006705 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006706 for_each_queue(bp, i) {
6707
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006708 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006709 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6710 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006711 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006712 }
6713 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006714 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006715
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006716 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006717 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6718 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6719 bnx2x_fp(bp, i, rx_desc_mapping),
6720 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6721
6722 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6723 bnx2x_fp(bp, i, rx_comp_mapping),
6724 sizeof(struct eth_fast_path_rx_cqe) *
6725 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006726
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006727 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006728 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006729 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6730 bnx2x_fp(bp, i, rx_sge_mapping),
6731 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6732 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006733 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006734 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006735
6736 /* fastpath tx rings: tx_buf tx_desc */
6737 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6738 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6739 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006740 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006741 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006742 /* end of fastpath */
6743
6744 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006745 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006746
6747 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006748 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006749
Michael Chan37b091b2009-10-10 13:46:55 +00006750#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006751 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6752 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6753 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6754 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006755 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6756 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006757#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006758 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006759
6760#undef BNX2X_PCI_FREE
6761#undef BNX2X_KFREE
6762}
6763
6764static int bnx2x_alloc_mem(struct bnx2x *bp)
6765{
6766
6767#define BNX2X_PCI_ALLOC(x, y, size) \
6768 do { \
6769 x = pci_alloc_consistent(bp->pdev, size, y); \
6770 if (x == NULL) \
6771 goto alloc_mem_err; \
6772 memset(x, 0, size); \
6773 } while (0)
6774
6775#define BNX2X_ALLOC(x, size) \
6776 do { \
6777 x = vmalloc(size); \
6778 if (x == NULL) \
6779 goto alloc_mem_err; \
6780 memset(x, 0, size); \
6781 } while (0)
6782
6783 int i;
6784
6785 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006786 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006787 for_each_queue(bp, i) {
6788 bnx2x_fp(bp, i, bp) = bp;
6789
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006790 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006791 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6792 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006793 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006794 }
6795 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006796 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006797
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006798 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006799 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6800 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6801 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6802 &bnx2x_fp(bp, i, rx_desc_mapping),
6803 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6804
6805 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6806 &bnx2x_fp(bp, i, rx_comp_mapping),
6807 sizeof(struct eth_fast_path_rx_cqe) *
6808 NUM_RCQ_BD);
6809
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006810 /* SGE ring */
6811 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6812 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6813 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6814 &bnx2x_fp(bp, i, rx_sge_mapping),
6815 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006816 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006817 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006818 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006819
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006820 /* fastpath tx rings: tx_buf tx_desc */
6821 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6822 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6823 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6824 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006825 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006826 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006827 /* end of fastpath */
6828
6829 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6830 sizeof(struct host_def_status_block));
6831
6832 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6833 sizeof(struct bnx2x_slowpath));
6834
Michael Chan37b091b2009-10-10 13:46:55 +00006835#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006836 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6837
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006838 /* allocate searcher T2 table
6839 we allocate 1/4 of alloc num for T2
6840 (which is not entered into the ILT) */
6841 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6842
Michael Chan37b091b2009-10-10 13:46:55 +00006843 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006844 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006845 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006846
Michael Chan37b091b2009-10-10 13:46:55 +00006847 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006848 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6849
6850 /* QM queues (128*MAX_CONN) */
6851 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006852
6853 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6854 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006855#endif
6856
6857 /* Slow path ring */
6858 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6859
6860 return 0;
6861
6862alloc_mem_err:
6863 bnx2x_free_mem(bp);
6864 return -ENOMEM;
6865
6866#undef BNX2X_PCI_ALLOC
6867#undef BNX2X_ALLOC
6868}
6869
6870static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6871{
6872 int i;
6873
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006874 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006875 struct bnx2x_fastpath *fp = &bp->fp[i];
6876
6877 u16 bd_cons = fp->tx_bd_cons;
6878 u16 sw_prod = fp->tx_pkt_prod;
6879 u16 sw_cons = fp->tx_pkt_cons;
6880
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006881 while (sw_cons != sw_prod) {
6882 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6883 sw_cons++;
6884 }
6885 }
6886}
6887
6888static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6889{
6890 int i, j;
6891
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006892 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006893 struct bnx2x_fastpath *fp = &bp->fp[j];
6894
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006895 for (i = 0; i < NUM_RX_BD; i++) {
6896 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6897 struct sk_buff *skb = rx_buf->skb;
6898
6899 if (skb == NULL)
6900 continue;
6901
6902 pci_unmap_single(bp->pdev,
6903 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006904 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905
6906 rx_buf->skb = NULL;
6907 dev_kfree_skb(skb);
6908 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006909 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006910 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6911 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006912 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006913 }
6914}
6915
6916static void bnx2x_free_skbs(struct bnx2x *bp)
6917{
6918 bnx2x_free_tx_skbs(bp);
6919 bnx2x_free_rx_skbs(bp);
6920}
6921
6922static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6923{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006924 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006925
6926 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006927 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006928 bp->msix_table[0].vector);
6929
Michael Chan37b091b2009-10-10 13:46:55 +00006930#ifdef BCM_CNIC
6931 offset++;
6932#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006933 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006934 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006935 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006936 bnx2x_fp(bp, i, state));
6937
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006938 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006940}
6941
6942static void bnx2x_free_irq(struct bnx2x *bp)
6943{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006944 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006945 bnx2x_free_msix_irqs(bp);
6946 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006947 bp->flags &= ~USING_MSIX_FLAG;
6948
Eilon Greenstein8badd272009-02-12 08:36:15 +00006949 } else if (bp->flags & USING_MSI_FLAG) {
6950 free_irq(bp->pdev->irq, bp->dev);
6951 pci_disable_msi(bp->pdev);
6952 bp->flags &= ~USING_MSI_FLAG;
6953
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006954 } else
6955 free_irq(bp->pdev->irq, bp->dev);
6956}
6957
6958static int bnx2x_enable_msix(struct bnx2x *bp)
6959{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006960 int i, rc, offset = 1;
6961 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006962
Eilon Greenstein8badd272009-02-12 08:36:15 +00006963 bp->msix_table[0].entry = igu_vec;
6964 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006965
Michael Chan37b091b2009-10-10 13:46:55 +00006966#ifdef BCM_CNIC
6967 igu_vec = BP_L_ID(bp) + offset;
6968 bp->msix_table[1].entry = igu_vec;
6969 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6970 offset++;
6971#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006972 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006973 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006974 bp->msix_table[i + offset].entry = igu_vec;
6975 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6976 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006977 }
6978
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006979 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006980 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006981 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006982 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6983 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006984 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006985
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006986 bp->flags |= USING_MSIX_FLAG;
6987
6988 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006989}
6990
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006991static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6992{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006993 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006994
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006995 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6996 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006997 if (rc) {
6998 BNX2X_ERR("request sp irq failed\n");
6999 return -EBUSY;
7000 }
7001
Michael Chan37b091b2009-10-10 13:46:55 +00007002#ifdef BCM_CNIC
7003 offset++;
7004#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007005 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007006 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007007 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7008 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007009
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007010 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007011 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007012 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007013 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007014 bnx2x_free_msix_irqs(bp);
7015 return -EBUSY;
7016 }
7017
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007018 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007019 }
7020
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007021 i = BNX2X_NUM_QUEUES(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007022 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7023 " ... fp[%d] %d\n",
7024 bp->dev->name, bp->msix_table[0].vector,
7025 0, bp->msix_table[offset].vector,
7026 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007027
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007028 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007029}
7030
Eilon Greenstein8badd272009-02-12 08:36:15 +00007031static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007032{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007033 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007034
Eilon Greenstein8badd272009-02-12 08:36:15 +00007035 rc = pci_enable_msi(bp->pdev);
7036 if (rc) {
7037 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7038 return -1;
7039 }
7040 bp->flags |= USING_MSI_FLAG;
7041
7042 return 0;
7043}
7044
7045static int bnx2x_req_irq(struct bnx2x *bp)
7046{
7047 unsigned long flags;
7048 int rc;
7049
7050 if (bp->flags & USING_MSI_FLAG)
7051 flags = 0;
7052 else
7053 flags = IRQF_SHARED;
7054
7055 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007056 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007057 if (!rc)
7058 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7059
7060 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007061}
7062
Yitchak Gertner65abd742008-08-25 15:26:24 -07007063static void bnx2x_napi_enable(struct bnx2x *bp)
7064{
7065 int i;
7066
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007067 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007068 napi_enable(&bnx2x_fp(bp, i, napi));
7069}
7070
7071static void bnx2x_napi_disable(struct bnx2x *bp)
7072{
7073 int i;
7074
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007075 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007076 napi_disable(&bnx2x_fp(bp, i, napi));
7077}
7078
7079static void bnx2x_netif_start(struct bnx2x *bp)
7080{
Eilon Greensteine1510702009-07-21 05:47:41 +00007081 int intr_sem;
7082
7083 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7084 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7085
7086 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007087 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007088 bnx2x_napi_enable(bp);
7089 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007090 if (bp->state == BNX2X_STATE_OPEN)
7091 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007092 }
7093 }
7094}
7095
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007096static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007097{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007098 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007099 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007100 netif_tx_disable(bp->dev);
7101 bp->dev->trans_start = jiffies; /* prevent tx timeout */
Yitchak Gertner65abd742008-08-25 15:26:24 -07007102}
7103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007104/*
7105 * Init service functions
7106 */
7107
Michael Chane665bfd2009-10-10 13:46:54 +00007108/**
7109 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7110 *
7111 * @param bp driver descriptor
7112 * @param set set or clear an entry (1 or 0)
7113 * @param mac pointer to a buffer containing a MAC
7114 * @param cl_bit_vec bit vector of clients to register a MAC for
7115 * @param cam_offset offset in a CAM to use
7116 * @param with_bcast set broadcast MAC as well
7117 */
7118static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119 u32 cl_bit_vec, u8 cam_offset,
7120 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007121{
7122 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007123 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007124
7125 /* CAM allocation
7126 * unicasts 0-31:port0 32-63:port1
7127 * multicast 64-127:port0 128-191:port1
7128 */
Michael Chane665bfd2009-10-10 13:46:54 +00007129 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007132 config->hdr.reserved1 = 0;
7133
7134 /* primary MAC */
7135 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007136 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007137 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007138 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007139 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007140 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007141 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007142 if (set)
7143 config->config_table[0].target_table_entry.flags = 0;
7144 else
7145 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007146 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007147 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007148 config->config_table[0].target_table_entry.vlan_id = 0;
7149
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007150 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007152 config->config_table[0].cam_entry.msb_mac_addr,
7153 config->config_table[0].cam_entry.middle_mac_addr,
7154 config->config_table[0].cam_entry.lsb_mac_addr);
7155
7156 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007157 if (with_bcast) {
7158 config->config_table[1].cam_entry.msb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.middle_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.lsb_mac_addr =
7163 cpu_to_le16(0xffff);
7164 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7165 if (set)
7166 config->config_table[1].target_table_entry.flags =
7167 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7168 else
7169 CAM_INVALIDATE(config->config_table[1]);
7170 config->config_table[1].target_table_entry.clients_bit_vector =
7171 cpu_to_le32(cl_bit_vec);
7172 config->config_table[1].target_table_entry.vlan_id = 0;
7173 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007174
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7178}
7179
Michael Chane665bfd2009-10-10 13:46:54 +00007180/**
7181 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7182 *
7183 * @param bp driver descriptor
7184 * @param set set or clear an entry (1 or 0)
7185 * @param mac pointer to a buffer containing a MAC
7186 * @param cl_bit_vec bit vector of clients to register a MAC for
7187 * @param cam_offset offset in a CAM to use
7188 */
7189static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007191{
7192 struct mac_configuration_cmd_e1h *config =
7193 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7194
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007195 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007196 config->hdr.offset = cam_offset;
7197 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007198 config->hdr.reserved1 = 0;
7199
7200 /* primary MAC */
7201 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007202 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007203 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007204 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007205 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007206 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007207 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007208 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007209 config->config_table[0].vlan_id = 0;
7210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007211 if (set)
7212 config->config_table[0].flags = BP_PORT(bp);
7213 else
7214 config->config_table[0].flags =
7215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007216
Michael Chane665bfd2009-10-10 13:46:54 +00007217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007218 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007219 config->config_table[0].msb_mac_addr,
7220 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007221 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007222
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7226}
7227
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007228static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229 int *state_p, int poll)
7230{
7231 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007232 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007233
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007236
7237 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007238 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007239 if (poll) {
7240 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007241 /* if index is different from 0
7242 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007243 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007244 */
7245 if (idx)
7246 bnx2x_rx_int(&bp->fp[idx], 10);
7247 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007248
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007249 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007250 if (*state_p == state) {
7251#ifdef BNX2X_STOP_ON_ERROR
7252 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7253#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007254 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007255 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007256
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007257 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007258
7259 if (bp->panic)
7260 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007261 }
7262
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007263 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007264 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007266#ifdef BNX2X_STOP_ON_ERROR
7267 bnx2x_panic();
7268#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007269
Eliezer Tamir49d66772008-02-28 11:53:13 -08007270 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007271}
7272
Michael Chane665bfd2009-10-10 13:46:54 +00007273static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7274{
7275 bp->set_mac_pending++;
7276 smp_wmb();
7277
7278 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279 (1 << bp->fp->cl_id), BP_FUNC(bp));
7280
7281 /* Wait for a completion */
7282 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7283}
7284
7285static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7286{
7287 bp->set_mac_pending++;
7288 smp_wmb();
7289
7290 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7292 1);
7293
7294 /* Wait for a completion */
7295 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7296}
7297
Michael Chan993ac7b2009-10-10 13:46:56 +00007298#ifdef BCM_CNIC
7299/**
7300 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301 * MAC(s). This function will wait until the ramdord completion
7302 * returns.
7303 *
7304 * @param bp driver handle
7305 * @param set set or clear the CAM entry
7306 *
7307 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7308 */
7309static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7310{
7311 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7312
7313 bp->set_mac_pending++;
7314 smp_wmb();
7315
7316 /* Send a SET_MAC ramrod */
7317 if (CHIP_IS_E1(bp))
7318 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7320 1);
7321 else
7322 /* CAM allocation for E1H
7323 * unicasts: by func number
7324 * multicast: 20+FUNC*20, 20 each
7325 */
7326 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7328
7329 /* Wait for a completion when setting */
7330 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7331
7332 return 0;
7333}
7334#endif
7335
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007336static int bnx2x_setup_leading(struct bnx2x *bp)
7337{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007338 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007339
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007340 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007341 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007342
7343 /* SETUP ramrod */
7344 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7345
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007346 /* Wait for completion */
7347 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007348
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007349 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350}
7351
7352static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7353{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007354 struct bnx2x_fastpath *fp = &bp->fp[index];
7355
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007356 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007357 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007358
Eliezer Tamir228241e2008-02-28 11:56:57 -08007359 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007360 fp->state = BNX2X_FP_STATE_OPENING;
7361 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007363
7364 /* Wait for completion */
7365 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007366 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007367}
7368
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007369static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007371static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007372{
Eilon Greensteinca003922009-08-12 22:53:28 -07007373
7374 switch (bp->multi_mode) {
7375 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007376 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007377 break;
7378
7379 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007380 if (num_queues)
7381 bp->num_queues = min_t(u32, num_queues,
7382 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007383 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007384 bp->num_queues = min_t(u32, num_online_cpus(),
7385 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007386 break;
7387
7388
7389 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007390 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007391 break;
7392 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007393}
7394
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007395static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007396{
7397 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007398
Eilon Greenstein8badd272009-02-12 08:36:15 +00007399 switch (int_mode) {
7400 case INT_MODE_INTx:
7401 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007402 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007403 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007404 break;
7405
7406 case INT_MODE_MSIX:
7407 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007408 /* Set number of queues according to bp->multi_mode value */
7409 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007410
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007411 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007413
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007414 /* if we can't use MSI-X we only need one fp,
7415 * so try to enable MSI-X with the requested number of fp's
7416 * and fallback to MSI or legacy INTx with one fp
7417 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007418 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007419 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007420 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007421 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007422 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007423 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007424 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007425 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007426}
7427
Michael Chan993ac7b2009-10-10 13:46:56 +00007428#ifdef BCM_CNIC
7429static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007432
7433/* must be called with rtnl_lock */
7434static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7435{
7436 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007437 int i, rc;
7438
Eilon Greenstein8badd272009-02-12 08:36:15 +00007439#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007440 if (unlikely(bp->panic))
7441 return -EPERM;
7442#endif
7443
7444 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7445
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007446 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007447
7448 if (bnx2x_alloc_mem(bp))
7449 return -ENOMEM;
7450
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007451 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007452 bnx2x_fp(bp, i, disable_tpa) =
7453 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7454
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007455 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007456 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7457 bnx2x_poll, 128);
7458
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007459 bnx2x_napi_enable(bp);
7460
7461 if (bp->flags & USING_MSIX_FLAG) {
7462 rc = bnx2x_req_msix_irqs(bp);
7463 if (rc) {
7464 pci_disable_msix(bp->pdev);
7465 goto load_error1;
7466 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007467 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007468 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007469 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007470 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7471 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007472 bnx2x_ack_int(bp);
7473 rc = bnx2x_req_irq(bp);
7474 if (rc) {
7475 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007476 if (bp->flags & USING_MSI_FLAG)
7477 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007478 goto load_error1;
7479 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007480 if (bp->flags & USING_MSI_FLAG) {
7481 bp->dev->irq = bp->pdev->irq;
7482 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7483 bp->dev->name, bp->pdev->irq);
7484 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007485 }
7486
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007487 /* Send LOAD_REQUEST command to MCP
7488 Returns the type of LOAD command:
7489 if it is the first port to be initialized
7490 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007491 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007492 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007493 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7494 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007495 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007496 rc = -EBUSY;
7497 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007498 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007499 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7500 rc = -EBUSY; /* other port in diagnostic mode */
7501 goto load_error2;
7502 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007503
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007504 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007505 int port = BP_PORT(bp);
7506
Eilon Greensteinf5372252009-02-12 08:38:30 +00007507 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007508 load_count[0], load_count[1], load_count[2]);
7509 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007510 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007511 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007512 load_count[0], load_count[1], load_count[2]);
7513 if (load_count[0] == 1)
7514 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007515 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007516 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7517 else
7518 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007519 }
7520
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007521 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7522 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7523 bp->port.pmf = 1;
7524 else
7525 bp->port.pmf = 0;
7526 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7527
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007528 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007529 rc = bnx2x_init_hw(bp, load_code);
7530 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007531 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007532 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007533 }
7534
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007535 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007536 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007537
Eilon Greenstein2691d512009-08-12 08:22:08 +00007538 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7539 (bp->common.shmem2_base))
7540 SHMEM2_WR(bp, dcc_support,
7541 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7542 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7543
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007544 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007545 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007546 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7547 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007548 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007549 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007550 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007551 }
7552 }
7553
7554 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7555
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007556 rc = bnx2x_setup_leading(bp);
7557 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007558 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007559#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007560 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007561#else
7562 bp->panic = 1;
7563 return -EBUSY;
7564#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007565 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007566
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007567 if (CHIP_IS_E1H(bp))
7568 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007569 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007570 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007571 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007572
Eilon Greensteinca003922009-08-12 22:53:28 -07007573 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007574#ifdef BCM_CNIC
7575 /* Enable Timer scan */
7576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7577#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007578 for_each_nondefault_queue(bp, i) {
7579 rc = bnx2x_setup_multi(bp, i);
7580 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007581#ifdef BCM_CNIC
7582 goto load_error4;
7583#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007584 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007585#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007586 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007587
Eilon Greensteinca003922009-08-12 22:53:28 -07007588 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007589 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007590 else
Michael Chane665bfd2009-10-10 13:46:54 +00007591 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007592#ifdef BCM_CNIC
7593 /* Set iSCSI L2 MAC */
7594 mutex_lock(&bp->cnic_mutex);
7595 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7596 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7597 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7598 }
7599 mutex_unlock(&bp->cnic_mutex);
7600#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007601 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007602
7603 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007604 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007605
7606 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007607 switch (load_mode) {
7608 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007609 if (bp->state == BNX2X_STATE_OPEN) {
7610 /* Tx queue should be only reenabled */
7611 netif_tx_wake_all_queues(bp->dev);
7612 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007613 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007614 bnx2x_set_rx_mode(bp->dev);
7615 break;
7616
7617 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007618 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007619 if (bp->state != BNX2X_STATE_OPEN)
7620 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007621 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007622 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007623 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007624
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007625 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007626 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007627 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007628 bp->state = BNX2X_STATE_DIAG;
7629 break;
7630
7631 default:
7632 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007633 }
7634
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007635 if (!bp->port.pmf)
7636 bnx2x__link_status_update(bp);
7637
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007638 /* start the timer */
7639 mod_timer(&bp->timer, jiffies + bp->current_interval);
7640
Michael Chan993ac7b2009-10-10 13:46:56 +00007641#ifdef BCM_CNIC
7642 bnx2x_setup_cnic_irq_info(bp);
7643 if (bp->state == BNX2X_STATE_OPEN)
7644 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7645#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007646
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007647 return 0;
7648
Michael Chan37b091b2009-10-10 13:46:55 +00007649#ifdef BCM_CNIC
7650load_error4:
7651 /* Disable Timer scan */
7652 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7653#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007654load_error3:
7655 bnx2x_int_disable_sync(bp, 1);
7656 if (!BP_NOMCP(bp)) {
7657 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7658 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7659 }
7660 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007661 /* Free SKBs, SGEs, TPA pool and driver internals */
7662 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007663 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007664 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007665load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007666 /* Release IRQs */
7667 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007668load_error1:
7669 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007670 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007671 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007672 bnx2x_free_mem(bp);
7673
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007674 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007675}
7676
7677static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7678{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007679 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007680 int rc;
7681
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007682 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007683 fp->state = BNX2X_FP_STATE_HALTING;
7684 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007685
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007686 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007687 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007688 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007689 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007690 return rc;
7691
7692 /* delete cfc entry */
7693 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7694
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007695 /* Wait for completion */
7696 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007697 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007698 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007699}
7700
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007701static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007702{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007703 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007704 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007705 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007706 int cnt = 500;
7707 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007708
7709 might_sleep();
7710
7711 /* Send HALT ramrod */
7712 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007715 /* Wait for completion */
7716 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7717 &(bp->fp[0].state), 1);
7718 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007719 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007720
Eliezer Tamir49d66772008-02-28 11:53:13 -08007721 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007722
Eliezer Tamir228241e2008-02-28 11:56:57 -08007723 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007724 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7725
Eliezer Tamir49d66772008-02-28 11:53:13 -08007726 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007727 we are going to reset the chip anyway
7728 so there is not much to do if this times out
7729 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007730 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007731 if (!cnt) {
7732 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7733 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7734 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7735#ifdef BNX2X_STOP_ON_ERROR
7736 bnx2x_panic();
7737#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007738 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007739 break;
7740 }
7741 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007742 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007743 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007744 }
7745 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7746 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007747
7748 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007749}
7750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007751static void bnx2x_reset_func(struct bnx2x *bp)
7752{
7753 int port = BP_PORT(bp);
7754 int func = BP_FUNC(bp);
7755 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007757 /* Configure IGU */
7758 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7759 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7760
Michael Chan37b091b2009-10-10 13:46:55 +00007761#ifdef BCM_CNIC
7762 /* Disable Timer scan */
7763 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7764 /*
7765 * Wait for at least 10ms and up to 2 second for the timers scan to
7766 * complete
7767 */
7768 for (i = 0; i < 200; i++) {
7769 msleep(10);
7770 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7771 break;
7772 }
7773#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007774 /* Clear ILT */
7775 base = FUNC_ILT_BASE(func);
7776 for (i = base; i < base + ILT_PER_FUNC; i++)
7777 bnx2x_ilt_wr(bp, i, 0);
7778}
7779
7780static void bnx2x_reset_port(struct bnx2x *bp)
7781{
7782 int port = BP_PORT(bp);
7783 u32 val;
7784
7785 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7786
7787 /* Do not rcv packets to BRB */
7788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7789 /* Do not direct rcv packets that are not for MCP to the BRB */
7790 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7792
7793 /* Configure AEU */
7794 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7795
7796 msleep(100);
7797 /* Check for BRB port occupancy */
7798 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7799 if (val)
7800 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007801 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007802
7803 /* TODO: Close Doorbell port? */
7804}
7805
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007806static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7807{
7808 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7809 BP_FUNC(bp), reset_code);
7810
7811 switch (reset_code) {
7812 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7813 bnx2x_reset_port(bp);
7814 bnx2x_reset_func(bp);
7815 bnx2x_reset_common(bp);
7816 break;
7817
7818 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7819 bnx2x_reset_port(bp);
7820 bnx2x_reset_func(bp);
7821 break;
7822
7823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7824 bnx2x_reset_func(bp);
7825 break;
7826
7827 default:
7828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7829 break;
7830 }
7831}
7832
Eilon Greenstein33471622008-08-13 15:59:08 -07007833/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007834static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007835{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007836 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007837 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007838 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007839
Michael Chan993ac7b2009-10-10 13:46:56 +00007840#ifdef BCM_CNIC
7841 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7842#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007843 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7844
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007845 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007846 bp->rx_mode = BNX2X_RX_MODE_NONE;
7847 bnx2x_set_storm_rx_mode(bp);
7848
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007849 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007850 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007851
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007852 del_timer_sync(&bp->timer);
7853 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7854 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007855 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007856
Eilon Greenstein70b99862009-01-14 06:43:48 +00007857 /* Release IRQs */
7858 bnx2x_free_irq(bp);
7859
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007860 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007861 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007862 struct bnx2x_fastpath *fp = &bp->fp[i];
7863
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007864 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007865 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007866
Eilon Greenstein7961f792009-03-02 07:59:31 +00007867 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007868 if (!cnt) {
7869 BNX2X_ERR("timeout waiting for queue[%d]\n",
7870 i);
7871#ifdef BNX2X_STOP_ON_ERROR
7872 bnx2x_panic();
7873 return -EBUSY;
7874#else
7875 break;
7876#endif
7877 }
7878 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007879 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007880 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007881 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007882 /* Give HW time to discard old tx messages */
7883 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007884
Yitchak Gertner65abd742008-08-25 15:26:24 -07007885 if (CHIP_IS_E1(bp)) {
7886 struct mac_configuration_cmd *config =
7887 bnx2x_sp(bp, mcast_config);
7888
Michael Chane665bfd2009-10-10 13:46:54 +00007889 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007890
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007891 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007892 CAM_INVALIDATE(config->config_table[i]);
7893
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007894 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007895 if (CHIP_REV_IS_SLOW(bp))
7896 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7897 else
7898 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007899 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007900 config->hdr.reserved1 = 0;
7901
Michael Chane665bfd2009-10-10 13:46:54 +00007902 bp->set_mac_pending++;
7903 smp_wmb();
7904
Yitchak Gertner65abd742008-08-25 15:26:24 -07007905 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7906 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7907 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7908
7909 } else { /* E1H */
7910 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7911
Michael Chane665bfd2009-10-10 13:46:54 +00007912 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007913
7914 for (i = 0; i < MC_HASH_SIZE; i++)
7915 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007916
7917 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007918 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007919#ifdef BCM_CNIC
7920 /* Clear iSCSI L2 MAC */
7921 mutex_lock(&bp->cnic_mutex);
7922 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7923 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7924 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7925 }
7926 mutex_unlock(&bp->cnic_mutex);
7927#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007928
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007929 if (unload_mode == UNLOAD_NORMAL)
7930 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007931
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007932 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007933 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007934
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007935 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007936 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007937 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007938 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007939 /* The mac address is written to entries 1-4 to
7940 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007941 u8 entry = (BP_E1HVN(bp) + 1)*8;
7942
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007943 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007944 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007945
7946 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7947 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007948 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007949
7950 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007951
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007952 } else
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7954
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007955 /* Close multi and leading connections
7956 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007957 for_each_nondefault_queue(bp, i)
7958 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007959 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007960
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007961 rc = bnx2x_stop_leading(bp);
7962 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007963 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007964#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007965 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007966#else
7967 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007968#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007969 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007970
Eliezer Tamir228241e2008-02-28 11:56:57 -08007971unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007972 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007973 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007974 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007975 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007976 load_count[0], load_count[1], load_count[2]);
7977 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007978 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007979 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007980 load_count[0], load_count[1], load_count[2]);
7981 if (load_count[0] == 0)
7982 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007983 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007984 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7985 else
7986 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7987 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007988
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7990 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7991 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007992
7993 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007994 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007995
7996 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007997 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007999
Eilon Greenstein9a035442008-11-03 16:45:55 -08008000 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008002 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008003 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008004 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008005 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008006 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008007 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008008 bnx2x_free_mem(bp);
8009
8010 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008011
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008012 netif_carrier_off(bp->dev);
8013
8014 return 0;
8015}
8016
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008017static void bnx2x_reset_task(struct work_struct *work)
8018{
8019 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8020
8021#ifdef BNX2X_STOP_ON_ERROR
8022 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8023 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008024 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008025 return;
8026#endif
8027
8028 rtnl_lock();
8029
8030 if (!netif_running(bp->dev))
8031 goto reset_task_exit;
8032
8033 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8034 bnx2x_nic_load(bp, LOAD_NORMAL);
8035
8036reset_task_exit:
8037 rtnl_unlock();
8038}
8039
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008040/* end of nic load/unload */
8041
8042/* ethtool_ops */
8043
8044/*
8045 * Init service functions
8046 */
8047
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008048static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8049{
8050 switch (func) {
8051 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8052 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8053 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8054 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8055 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8056 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8057 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8058 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8059 default:
8060 BNX2X_ERR("Unsupported function index: %d\n", func);
8061 return (u32)(-1);
8062 }
8063}
8064
8065static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8066{
8067 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8068
8069 /* Flush all outstanding writes */
8070 mmiowb();
8071
8072 /* Pretend to be function 0 */
8073 REG_WR(bp, reg, 0);
8074 /* Flush the GRC transaction (in the chip) */
8075 new_val = REG_RD(bp, reg);
8076 if (new_val != 0) {
8077 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8078 new_val);
8079 BUG();
8080 }
8081
8082 /* From now we are in the "like-E1" mode */
8083 bnx2x_int_disable(bp);
8084
8085 /* Flush all outstanding writes */
8086 mmiowb();
8087
8088 /* Restore the original funtion settings */
8089 REG_WR(bp, reg, orig_func);
8090 new_val = REG_RD(bp, reg);
8091 if (new_val != orig_func) {
8092 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8093 orig_func, new_val);
8094 BUG();
8095 }
8096}
8097
8098static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8099{
8100 if (CHIP_IS_E1H(bp))
8101 bnx2x_undi_int_disable_e1h(bp, func);
8102 else
8103 bnx2x_int_disable(bp);
8104}
8105
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008106static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008107{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008108 u32 val;
8109
8110 /* Check if there is any driver already loaded */
8111 val = REG_RD(bp, MISC_REG_UNPREPARED);
8112 if (val == 0x1) {
8113 /* Check if it is the UNDI driver
8114 * UNDI driver initializes CID offset for normal bell to 0x7
8115 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008116 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008117 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8118 if (val == 0x7) {
8119 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008120 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008121 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008122 u32 swap_en;
8123 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008124
Eilon Greensteinb4661732009-01-14 06:43:56 +00008125 /* clear the UNDI indication */
8126 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8127
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008128 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8129
8130 /* try unload UNDI on port 0 */
8131 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008132 bp->fw_seq =
8133 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8134 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008135 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008136
8137 /* if UNDI is loaded on the other port */
8138 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8139
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008140 /* send "DONE" for previous unload */
8141 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8142
8143 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008144 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008145 bp->fw_seq =
8146 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147 DRV_MSG_SEQ_NUMBER_MASK);
8148 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008149
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008150 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008151 }
8152
Eilon Greensteinb4661732009-01-14 06:43:56 +00008153 /* now it's safe to release the lock */
8154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8155
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008156 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008157
8158 /* close input traffic and wait for it */
8159 /* Do not rcv packets to BRB */
8160 REG_WR(bp,
8161 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8162 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8163 /* Do not direct rcv packets that are not for MCP to
8164 * the BRB */
8165 REG_WR(bp,
8166 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8167 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8168 /* clear AEU */
8169 REG_WR(bp,
8170 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8171 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8172 msleep(10);
8173
8174 /* save NIG port swap info */
8175 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8176 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008177 /* reset device */
8178 REG_WR(bp,
8179 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008180 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008181 REG_WR(bp,
8182 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8183 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008184 /* take the NIG out of reset and restore swap values */
8185 REG_WR(bp,
8186 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8187 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8188 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8189 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8190
8191 /* send unload done to the MCP */
8192 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8193
8194 /* restore our func and fw_seq */
8195 bp->func = func;
8196 bp->fw_seq =
8197 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008199
8200 } else
8201 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008202 }
8203}
8204
8205static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8206{
8207 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008208 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008209
8210 /* Get the chip revision id and number. */
8211 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8212 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8213 id = ((val & 0xffff) << 16);
8214 val = REG_RD(bp, MISC_REG_CHIP_REV);
8215 id |= ((val & 0xf) << 12);
8216 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8217 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008218 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008219 id |= (val & 0xf);
8220 bp->common.chip_id = id;
8221 bp->link_params.chip_id = bp->common.chip_id;
8222 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8223
Eilon Greenstein1c063282009-02-12 08:36:43 +00008224 val = (REG_RD(bp, 0x2874) & 0x55);
8225 if ((bp->common.chip_id & 0x1) ||
8226 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8227 bp->flags |= ONE_PORT_FLAG;
8228 BNX2X_DEV_INFO("single port device\n");
8229 }
8230
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008231 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8232 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8233 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8234 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8235 bp->common.flash_size, bp->common.flash_size);
8236
8237 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008238 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008239 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008240 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8241 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008242
8243 if (!bp->common.shmem_base ||
8244 (bp->common.shmem_base < 0xA0000) ||
8245 (bp->common.shmem_base >= 0xC0000)) {
8246 BNX2X_DEV_INFO("MCP not active\n");
8247 bp->flags |= NO_MCP_FLAG;
8248 return;
8249 }
8250
8251 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8252 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8253 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254 BNX2X_ERR("BAD MCP validity signature\n");
8255
8256 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008257 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008258
8259 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8260 SHARED_HW_CFG_LED_MODE_MASK) >>
8261 SHARED_HW_CFG_LED_MODE_SHIFT);
8262
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008263 bp->link_params.feature_config_flags = 0;
8264 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8265 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8266 bp->link_params.feature_config_flags |=
8267 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8268 else
8269 bp->link_params.feature_config_flags &=
8270 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8271
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008272 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8273 bp->common.bc_ver = val;
8274 BNX2X_DEV_INFO("bc_ver %X\n", val);
8275 if (val < BNX2X_BC_VER) {
8276 /* for now only warn
8277 * later we might need to enforce this */
8278 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8279 " please upgrade BC\n", BNX2X_BC_VER, val);
8280 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008281 bp->link_params.feature_config_flags |=
8282 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8283 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008284
8285 if (BP_E1HVN(bp) == 0) {
8286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8287 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8288 } else {
8289 /* no WOL capability for E1HVN != 0 */
8290 bp->flags |= NO_WOL_FLAG;
8291 }
8292 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008293 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008294
8295 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8296 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8297 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8299
8300 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8301 val, val2, val3, val4);
8302}
8303
8304static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305 u32 switch_cfg)
8306{
8307 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008308 u32 ext_phy_type;
8309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008310 switch (switch_cfg) {
8311 case SWITCH_CFG_1G:
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008314 ext_phy_type =
8315 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008316 switch (ext_phy_type) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319 ext_phy_type);
8320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008321 bp->port.supported |= (SUPPORTED_10baseT_Half |
8322 SUPPORTED_10baseT_Full |
8323 SUPPORTED_100baseT_Half |
8324 SUPPORTED_100baseT_Full |
8325 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_2500baseX_Full |
8327 SUPPORTED_TP |
8328 SUPPORTED_FIBRE |
8329 SUPPORTED_Autoneg |
8330 SUPPORTED_Pause |
8331 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008332 break;
8333
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336 ext_phy_type);
8337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008338 bp->port.supported |= (SUPPORTED_10baseT_Half |
8339 SUPPORTED_10baseT_Full |
8340 SUPPORTED_100baseT_Half |
8341 SUPPORTED_100baseT_Full |
8342 SUPPORTED_1000baseT_Full |
8343 SUPPORTED_TP |
8344 SUPPORTED_FIBRE |
8345 SUPPORTED_Autoneg |
8346 SUPPORTED_Pause |
8347 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008348 break;
8349
8350 default:
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008353 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008354 return;
8355 }
8356
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008357 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358 port*0x10);
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008360 break;
8361
8362 case SWITCH_CFG_10G:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008365 ext_phy_type =
8366 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008367 switch (ext_phy_type) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370 ext_phy_type);
8371
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008372 bp->port.supported |= (SUPPORTED_10baseT_Half |
8373 SUPPORTED_10baseT_Full |
8374 SUPPORTED_100baseT_Half |
8375 SUPPORTED_100baseT_Full |
8376 SUPPORTED_1000baseT_Full |
8377 SUPPORTED_2500baseX_Full |
8378 SUPPORTED_10000baseT_Full |
8379 SUPPORTED_TP |
8380 SUPPORTED_FIBRE |
8381 SUPPORTED_Autoneg |
8382 SUPPORTED_Pause |
8383 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008384 break;
8385
Eliezer Tamirf1410642008-02-28 11:51:50 -08008386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8388 ext_phy_type);
8389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008390 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391 SUPPORTED_1000baseT_Full |
8392 SUPPORTED_FIBRE |
8393 SUPPORTED_Autoneg |
8394 SUPPORTED_Pause |
8395 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008396 break;
8397
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8400 ext_phy_type);
8401
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403 SUPPORTED_2500baseX_Full |
8404 SUPPORTED_1000baseT_Full |
8405 SUPPORTED_FIBRE |
8406 SUPPORTED_Autoneg |
8407 SUPPORTED_Pause |
8408 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008409 break;
8410
Eilon Greenstein589abe32009-02-12 08:36:55 +00008411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413 ext_phy_type);
8414
8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416 SUPPORTED_FIBRE |
8417 SUPPORTED_Pause |
8418 SUPPORTED_Asym_Pause);
8419 break;
8420
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8423 ext_phy_type);
8424
8425 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426 SUPPORTED_1000baseT_Full |
8427 SUPPORTED_FIBRE |
8428 SUPPORTED_Pause |
8429 SUPPORTED_Asym_Pause);
8430 break;
8431
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8434 ext_phy_type);
8435
8436 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437 SUPPORTED_1000baseT_Full |
8438 SUPPORTED_Autoneg |
8439 SUPPORTED_FIBRE |
8440 SUPPORTED_Pause |
8441 SUPPORTED_Asym_Pause);
8442 break;
8443
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446 ext_phy_type);
8447
8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449 SUPPORTED_1000baseT_Full |
8450 SUPPORTED_Autoneg |
8451 SUPPORTED_FIBRE |
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
8454 break;
8455
Eliezer Tamirf1410642008-02-28 11:51:50 -08008456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458 ext_phy_type);
8459
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461 SUPPORTED_TP |
8462 SUPPORTED_Autoneg |
8463 SUPPORTED_Pause |
8464 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008465 break;
8466
Eilon Greenstein28577182009-02-12 08:37:00 +00008467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469 ext_phy_type);
8470
8471 bp->port.supported |= (SUPPORTED_10baseT_Half |
8472 SUPPORTED_10baseT_Full |
8473 SUPPORTED_100baseT_Half |
8474 SUPPORTED_100baseT_Full |
8475 SUPPORTED_1000baseT_Full |
8476 SUPPORTED_10000baseT_Full |
8477 SUPPORTED_TP |
8478 SUPPORTED_Autoneg |
8479 SUPPORTED_Pause |
8480 SUPPORTED_Asym_Pause);
8481 break;
8482
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp->link_params.ext_phy_config);
8486 break;
8487
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008488 default:
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008491 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008492 return;
8493 }
8494
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008495 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496 port*0x18);
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008498
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008499 break;
8500
8501 default:
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008503 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008504 return;
8505 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008506 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008507
8508 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008509 if (!(bp->link_params.speed_cap_mask &
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008511 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008512
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008513 if (!(bp->link_params.speed_cap_mask &
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008515 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008516
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008517 if (!(bp->link_params.speed_cap_mask &
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008519 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008520
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008523 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008524
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008527 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008529
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008532 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008533
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008536 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008538 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008539}
8540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008541static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008543 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008544
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008545 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008546 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008547 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008548 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008549 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008551 u32 ext_phy_type =
8552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554 if ((ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556 (ext_phy_type ==
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008558 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008559 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008561 (ADVERTISED_10000baseT_Full |
8562 ADVERTISED_FIBRE);
8563 break;
8564 }
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008568 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008569 return;
8570 }
8571 break;
8572
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008574 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008575 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008576 bp->port.advertising = (ADVERTISED_10baseT_Full |
8577 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008578 } else {
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008582 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008583 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008584 return;
8585 }
8586 break;
8587
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008589 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008590 bp->link_params.req_line_speed = SPEED_10;
8591 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008592 bp->port.advertising = (ADVERTISED_10baseT_Half |
8593 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008594 } else {
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008598 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008599 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600 return;
8601 }
8602 break;
8603
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008605 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008606 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008607 bp->port.advertising = (ADVERTISED_100baseT_Full |
8608 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008609 } else {
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008613 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008614 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008615 return;
8616 }
8617 break;
8618
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008621 bp->link_params.req_line_speed = SPEED_100;
8622 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008623 bp->port.advertising = (ADVERTISED_100baseT_Half |
8624 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008625 } else {
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008629 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008630 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008631 return;
8632 }
8633 break;
8634
8635 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008636 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008637 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008638 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008640 } else {
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008644 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008645 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008646 return;
8647 }
8648 break;
8649
8650 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008651 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008652 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008653 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008655 } else {
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008659 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008660 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008661 return;
8662 }
8663 break;
8664
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008668 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008669 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008670 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008672 } else {
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008676 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008677 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008678 return;
8679 }
8680 break;
8681
8682 default:
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008685 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008686 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008687 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008688 break;
8689 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008691 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008693 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008694 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008695 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008696
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008698 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008699 bp->link_params.req_line_speed,
8700 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008701 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008702}
8703
Michael Chane665bfd2009-10-10 13:46:54 +00008704static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705{
8706 mac_hi = cpu_to_be16(mac_hi);
8707 mac_lo = cpu_to_be32(mac_lo);
8708 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710}
8711
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008713{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 int port = BP_PORT(bp);
8715 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008716 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008717 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008718 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008719
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008720 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008721 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008722
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008723 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008724 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008725 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008726 SHMEM_RD(bp,
8727 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731 bp->link_params.ext_phy_config &=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733 bp->link_params.ext_phy_config |=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735 bp->link_params.feature_config_flags |=
8736 FEATURE_CONFIG_BCM8727_NOC;
8737 }
8738
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008739 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740 SHMEM_RD(bp,
8741 dev_info.port_hw_config[port].speed_capability_mask);
8742
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008743 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008744 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i = 0; i < 2; i++) {
8748 val = SHMEM_RD(bp,
8749 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753 val = SHMEM_RD(bp,
8754 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757 }
8758
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008759 /* If the device is capable of WoL, set the default state according
8760 * to the HW
8761 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008762 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008763 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764 (config & PORT_FEATURE_WOL_ENABLED));
8765
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008768 bp->link_params.lane_config,
8769 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008770 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008771
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008772 bp->link_params.switch_cfg |= (bp->port.link_config &
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008774 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008775
8776 bnx2x_link_settings_requested(bp);
8777
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008778 /*
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8781 */
8782 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008789 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008790
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008791 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008793 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008794 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008796
8797#ifdef BCM_CNIC
8798 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008802}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008804static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805{
8806 int func = BP_FUNC(bp);
8807 u32 val, val2;
8808 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008809
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008810 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008811
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008812 bp->e1hov = 0;
8813 bp->e1hmf = 0;
8814 if (CHIP_IS_E1H(bp)) {
8815 bp->mf_config =
8816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008817
Eilon Greenstein2691d512009-08-12 08:22:08 +00008818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008819 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008821 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp) ? "multi" : "single");
8824
8825 if (IS_E1HMF(bp)) {
8826 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827 e1hov_tag) &
8828 FUNC_MF_CFG_E1HOV_TAG_MASK);
8829 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830 bp->e1hov = val;
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832 "(0x%04x)\n",
8833 func, bp->e1hov, bp->e1hov);
8834 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func);
8837 rc = -EPERM;
8838 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008839 } else {
8840 if (BP_E1HVN(bp)) {
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp));
8843 rc = -EPERM;
8844 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008845 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008846 }
8847
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008848 if (!BP_NOMCP(bp)) {
8849 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008851 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852 DRV_MSG_SEQ_NUMBER_MASK);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008855
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008856 if (IS_E1HMF(bp)) {
8857 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8859 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8866 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868 ETH_ALEN);
8869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870 ETH_ALEN);
8871 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008872
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008873 return rc;
8874 }
8875
8876 if (BP_NOMCP(bp)) {
8877 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008878 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 random_ether_addr(bp->dev->dev_addr);
8880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881 }
8882
8883 return rc;
8884}
8885
8886static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887{
8888 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008889 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008890 int rc;
8891
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008895
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008896 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008897 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008898#ifdef BCM_CNIC
8899 mutex_init(&bp->cnic_mutex);
8900#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008901
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008902 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008903 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905 rc = bnx2x_get_hwinfo(bp);
8906
8907 /* need to reset chip if undi was active */
8908 if (!BP_NOMCP(bp))
8909 bnx2x_undi_unload(bp);
8910
8911 if (CHIP_REV_IS_FPGA(bp))
8912 printk(KERN_ERR PFX "FPGA detected\n");
8913
8914 if (BP_NOMCP(bp) && (func == 0))
8915 printk(KERN_ERR PFX
8916 "MCP disabled, must load devices in order!\n");
8917
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008918 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008919 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008921 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008922 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008923 multi_mode = ETH_RSS_MODE_DISABLED;
8924 }
8925 bp->multi_mode = multi_mode;
8926
8927
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008928 /* Set TPA flags */
8929 if (disable_tpa) {
8930 bp->flags &= ~TPA_ENABLE_FLAG;
8931 bp->dev->features &= ~NETIF_F_LRO;
8932 } else {
8933 bp->flags |= TPA_ENABLE_FLAG;
8934 bp->dev->features |= NETIF_F_LRO;
8935 }
8936
Eilon Greensteina18f5122009-08-12 08:23:26 +00008937 if (CHIP_IS_E1(bp))
8938 bp->dropless_fc = 0;
8939 else
8940 bp->dropless_fc = dropless_fc;
8941
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008942 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008943
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008944 bp->tx_ring_size = MAX_TX_AVAIL;
8945 bp->rx_ring_size = MAX_RX_AVAIL;
8946
8947 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008948
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008949 /* make sure that the numbers are in the right granularity */
8950 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8951 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008952
Eilon Greenstein87942b42009-02-12 08:36:49 +00008953 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8954 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008955
8956 init_timer(&bp->timer);
8957 bp->timer.expires = jiffies + bp->current_interval;
8958 bp->timer.data = (unsigned long) bp;
8959 bp->timer.function = bnx2x_timer;
8960
8961 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008962}
8963
8964/*
8965 * ethtool service functions
8966 */
8967
8968/* All ethtool functions called with rtnl_lock */
8969
8970static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8971{
8972 struct bnx2x *bp = netdev_priv(dev);
8973
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008974 cmd->supported = bp->port.supported;
8975 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008976
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008977 if ((bp->state == BNX2X_STATE_OPEN) &&
8978 !(bp->flags & MF_FUNC_DIS) &&
8979 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008980 cmd->speed = bp->link_vars.line_speed;
8981 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008982 if (IS_E1HMF(bp)) {
8983 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008984
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008985 vn_max_rate =
8986 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008987 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008988 if (vn_max_rate < cmd->speed)
8989 cmd->speed = vn_max_rate;
8990 }
8991 } else {
8992 cmd->speed = -1;
8993 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008994 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008995
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008996 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8997 u32 ext_phy_type =
8998 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008999
9000 switch (ext_phy_type) {
9001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009008 cmd->port = PORT_FIBRE;
9009 break;
9010
9011 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009013 cmd->port = PORT_TP;
9014 break;
9015
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9017 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9018 bp->link_params.ext_phy_config);
9019 break;
9020
Eliezer Tamirf1410642008-02-28 11:51:50 -08009021 default:
9022 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009023 bp->link_params.ext_phy_config);
9024 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009025 }
9026 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009027 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009028
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009029 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009030 cmd->transceiver = XCVR_INTERNAL;
9031
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009032 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009034 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009035 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009036
9037 cmd->maxtxpkt = 0;
9038 cmd->maxrxpkt = 0;
9039
9040 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9041 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9042 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9043 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9044 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9045 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9046 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9047
9048 return 0;
9049}
9050
9051static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9052{
9053 struct bnx2x *bp = netdev_priv(dev);
9054 u32 advertising;
9055
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009056 if (IS_E1HMF(bp))
9057 return 0;
9058
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9066
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009067 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009068 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9069 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009071 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009072
9073 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009074 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009075
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009076 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9077 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009078 bp->port.advertising |= (ADVERTISED_Autoneg |
9079 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009080
9081 } else { /* forced speed */
9082 /* advertise the requested speed and duplex if supported */
9083 switch (cmd->speed) {
9084 case SPEED_10:
9085 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009086 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009087 SUPPORTED_10baseT_Full)) {
9088 DP(NETIF_MSG_LINK,
9089 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009090 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009091 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009092
9093 advertising = (ADVERTISED_10baseT_Full |
9094 ADVERTISED_TP);
9095 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009096 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009097 SUPPORTED_10baseT_Half)) {
9098 DP(NETIF_MSG_LINK,
9099 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009100 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009101 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009102
9103 advertising = (ADVERTISED_10baseT_Half |
9104 ADVERTISED_TP);
9105 }
9106 break;
9107
9108 case SPEED_100:
9109 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009110 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009111 SUPPORTED_100baseT_Full)) {
9112 DP(NETIF_MSG_LINK,
9113 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009114 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009115 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009116
9117 advertising = (ADVERTISED_100baseT_Full |
9118 ADVERTISED_TP);
9119 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009120 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009121 SUPPORTED_100baseT_Half)) {
9122 DP(NETIF_MSG_LINK,
9123 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009124 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009125 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009126
9127 advertising = (ADVERTISED_100baseT_Half |
9128 ADVERTISED_TP);
9129 }
9130 break;
9131
9132 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009133 if (cmd->duplex != DUPLEX_FULL) {
9134 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009135 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009136 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009137
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009138 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009139 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009141 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009142
9143 advertising = (ADVERTISED_1000baseT_Full |
9144 ADVERTISED_TP);
9145 break;
9146
9147 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009148 if (cmd->duplex != DUPLEX_FULL) {
9149 DP(NETIF_MSG_LINK,
9150 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009151 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009152 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009153
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009154 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009155 DP(NETIF_MSG_LINK,
9156 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009157 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009158 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009159
Eliezer Tamirf1410642008-02-28 11:51:50 -08009160 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009161 ADVERTISED_TP);
9162 break;
9163
9164 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009165 if (cmd->duplex != DUPLEX_FULL) {
9166 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009167 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009168 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009169
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009170 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009171 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009172 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009173 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009174
9175 advertising = (ADVERTISED_10000baseT_Full |
9176 ADVERTISED_FIBRE);
9177 break;
9178
9179 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009180 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009181 return -EINVAL;
9182 }
9183
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009184 bp->link_params.req_line_speed = cmd->speed;
9185 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009186 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009187 }
9188
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009189 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009190 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009191 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009192 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009194 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009195 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009196 bnx2x_link_set(bp);
9197 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009198
9199 return 0;
9200}
9201
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009202#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9203#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9204
9205static int bnx2x_get_regs_len(struct net_device *dev)
9206{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009207 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009208 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009209 int i;
9210
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009211 if (CHIP_IS_E1(bp)) {
9212 for (i = 0; i < REGS_COUNT; i++)
9213 if (IS_E1_ONLINE(reg_addrs[i].info))
9214 regdump_len += reg_addrs[i].size;
9215
9216 for (i = 0; i < WREGS_COUNT_E1; i++)
9217 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9218 regdump_len += wreg_addrs_e1[i].size *
9219 (1 + wreg_addrs_e1[i].read_regs_count);
9220
9221 } else { /* E1H */
9222 for (i = 0; i < REGS_COUNT; i++)
9223 if (IS_E1H_ONLINE(reg_addrs[i].info))
9224 regdump_len += reg_addrs[i].size;
9225
9226 for (i = 0; i < WREGS_COUNT_E1H; i++)
9227 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9228 regdump_len += wreg_addrs_e1h[i].size *
9229 (1 + wreg_addrs_e1h[i].read_regs_count);
9230 }
9231 regdump_len *= 4;
9232 regdump_len += sizeof(struct dump_hdr);
9233
9234 return regdump_len;
9235}
9236
9237static void bnx2x_get_regs(struct net_device *dev,
9238 struct ethtool_regs *regs, void *_p)
9239{
9240 u32 *p = _p, i, j;
9241 struct bnx2x *bp = netdev_priv(dev);
9242 struct dump_hdr dump_hdr = {0};
9243
9244 regs->version = 0;
9245 memset(p, 0, regs->len);
9246
9247 if (!netif_running(bp->dev))
9248 return;
9249
9250 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9251 dump_hdr.dump_sign = dump_sign_all;
9252 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9253 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9254 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9255 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9256 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9257
9258 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9259 p += dump_hdr.hdr_size + 1;
9260
9261 if (CHIP_IS_E1(bp)) {
9262 for (i = 0; i < REGS_COUNT; i++)
9263 if (IS_E1_ONLINE(reg_addrs[i].info))
9264 for (j = 0; j < reg_addrs[i].size; j++)
9265 *p++ = REG_RD(bp,
9266 reg_addrs[i].addr + j*4);
9267
9268 } else { /* E1H */
9269 for (i = 0; i < REGS_COUNT; i++)
9270 if (IS_E1H_ONLINE(reg_addrs[i].info))
9271 for (j = 0; j < reg_addrs[i].size; j++)
9272 *p++ = REG_RD(bp,
9273 reg_addrs[i].addr + j*4);
9274 }
9275}
9276
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009277#define PHY_FW_VER_LEN 10
9278
9279static void bnx2x_get_drvinfo(struct net_device *dev,
9280 struct ethtool_drvinfo *info)
9281{
9282 struct bnx2x *bp = netdev_priv(dev);
9283 u8 phy_fw_ver[PHY_FW_VER_LEN];
9284
9285 strcpy(info->driver, DRV_MODULE_NAME);
9286 strcpy(info->version, DRV_MODULE_VERSION);
9287
9288 phy_fw_ver[0] = '\0';
9289 if (bp->port.pmf) {
9290 bnx2x_acquire_phy_lock(bp);
9291 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9292 (bp->state != BNX2X_STATE_CLOSED),
9293 phy_fw_ver, PHY_FW_VER_LEN);
9294 bnx2x_release_phy_lock(bp);
9295 }
9296
9297 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9298 (bp->common.bc_ver & 0xff0000) >> 16,
9299 (bp->common.bc_ver & 0xff00) >> 8,
9300 (bp->common.bc_ver & 0xff),
9301 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9302 strcpy(info->bus_info, pci_name(bp->pdev));
9303 info->n_stats = BNX2X_NUM_STATS;
9304 info->testinfo_len = BNX2X_NUM_TESTS;
9305 info->eedump_len = bp->common.flash_size;
9306 info->regdump_len = bnx2x_get_regs_len(dev);
9307}
9308
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009309static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9310{
9311 struct bnx2x *bp = netdev_priv(dev);
9312
9313 if (bp->flags & NO_WOL_FLAG) {
9314 wol->supported = 0;
9315 wol->wolopts = 0;
9316 } else {
9317 wol->supported = WAKE_MAGIC;
9318 if (bp->wol)
9319 wol->wolopts = WAKE_MAGIC;
9320 else
9321 wol->wolopts = 0;
9322 }
9323 memset(&wol->sopass, 0, sizeof(wol->sopass));
9324}
9325
9326static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9327{
9328 struct bnx2x *bp = netdev_priv(dev);
9329
9330 if (wol->wolopts & ~WAKE_MAGIC)
9331 return -EINVAL;
9332
9333 if (wol->wolopts & WAKE_MAGIC) {
9334 if (bp->flags & NO_WOL_FLAG)
9335 return -EINVAL;
9336
9337 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009338 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009339 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009340
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009341 return 0;
9342}
9343
9344static u32 bnx2x_get_msglevel(struct net_device *dev)
9345{
9346 struct bnx2x *bp = netdev_priv(dev);
9347
9348 return bp->msglevel;
9349}
9350
9351static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9352{
9353 struct bnx2x *bp = netdev_priv(dev);
9354
9355 if (capable(CAP_NET_ADMIN))
9356 bp->msglevel = level;
9357}
9358
9359static int bnx2x_nway_reset(struct net_device *dev)
9360{
9361 struct bnx2x *bp = netdev_priv(dev);
9362
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009363 if (!bp->port.pmf)
9364 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009365
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009366 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009368 bnx2x_link_set(bp);
9369 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009370
9371 return 0;
9372}
9373
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009374static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009375{
9376 struct bnx2x *bp = netdev_priv(dev);
9377
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009378 if (bp->flags & MF_FUNC_DIS)
9379 return 0;
9380
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009381 return bp->link_vars.link_up;
9382}
9383
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009384static int bnx2x_get_eeprom_len(struct net_device *dev)
9385{
9386 struct bnx2x *bp = netdev_priv(dev);
9387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009388 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009389}
9390
9391static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9392{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009393 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009394 int count, i;
9395 u32 val = 0;
9396
9397 /* adjust timeout for emulation/FPGA */
9398 count = NVRAM_TIMEOUT_COUNT;
9399 if (CHIP_REV_IS_SLOW(bp))
9400 count *= 100;
9401
9402 /* request access to nvram interface */
9403 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9404 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9405
9406 for (i = 0; i < count*10; i++) {
9407 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9408 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9409 break;
9410
9411 udelay(5);
9412 }
9413
9414 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009415 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009416 return -EBUSY;
9417 }
9418
9419 return 0;
9420}
9421
9422static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9423{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009424 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009425 int count, i;
9426 u32 val = 0;
9427
9428 /* adjust timeout for emulation/FPGA */
9429 count = NVRAM_TIMEOUT_COUNT;
9430 if (CHIP_REV_IS_SLOW(bp))
9431 count *= 100;
9432
9433 /* relinquish nvram interface */
9434 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9435 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9436
9437 for (i = 0; i < count*10; i++) {
9438 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9439 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9440 break;
9441
9442 udelay(5);
9443 }
9444
9445 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009446 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009447 return -EBUSY;
9448 }
9449
9450 return 0;
9451}
9452
9453static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9454{
9455 u32 val;
9456
9457 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9458
9459 /* enable both bits, even on read */
9460 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9461 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9462 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9463}
9464
9465static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9466{
9467 u32 val;
9468
9469 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9470
9471 /* disable both bits, even after read */
9472 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9474 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9475}
9476
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009477static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009478 u32 cmd_flags)
9479{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009480 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009481 u32 val;
9482
9483 /* build the command word */
9484 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9485
9486 /* need to clear DONE bit separately */
9487 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9488
9489 /* address of the NVRAM to read from */
9490 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9491 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9492
9493 /* issue a read command */
9494 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9495
9496 /* adjust timeout for emulation/FPGA */
9497 count = NVRAM_TIMEOUT_COUNT;
9498 if (CHIP_REV_IS_SLOW(bp))
9499 count *= 100;
9500
9501 /* wait for completion */
9502 *ret_val = 0;
9503 rc = -EBUSY;
9504 for (i = 0; i < count; i++) {
9505 udelay(5);
9506 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9507
9508 if (val & MCPR_NVM_COMMAND_DONE) {
9509 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009510 /* we read nvram data in cpu order
9511 * but ethtool sees it as an array of bytes
9512 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009513 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009514 rc = 0;
9515 break;
9516 }
9517 }
9518
9519 return rc;
9520}
9521
9522static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9523 int buf_size)
9524{
9525 int rc;
9526 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009527 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009528
9529 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009530 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009531 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009532 offset, buf_size);
9533 return -EINVAL;
9534 }
9535
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009536 if (offset + buf_size > bp->common.flash_size) {
9537 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009538 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009539 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009540 return -EINVAL;
9541 }
9542
9543 /* request access to nvram interface */
9544 rc = bnx2x_acquire_nvram_lock(bp);
9545 if (rc)
9546 return rc;
9547
9548 /* enable access to nvram interface */
9549 bnx2x_enable_nvram_access(bp);
9550
9551 /* read the first word(s) */
9552 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9553 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9554 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9555 memcpy(ret_buf, &val, 4);
9556
9557 /* advance to the next dword */
9558 offset += sizeof(u32);
9559 ret_buf += sizeof(u32);
9560 buf_size -= sizeof(u32);
9561 cmd_flags = 0;
9562 }
9563
9564 if (rc == 0) {
9565 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9566 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567 memcpy(ret_buf, &val, 4);
9568 }
9569
9570 /* disable access to nvram interface */
9571 bnx2x_disable_nvram_access(bp);
9572 bnx2x_release_nvram_lock(bp);
9573
9574 return rc;
9575}
9576
9577static int bnx2x_get_eeprom(struct net_device *dev,
9578 struct ethtool_eeprom *eeprom, u8 *eebuf)
9579{
9580 struct bnx2x *bp = netdev_priv(dev);
9581 int rc;
9582
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009583 if (!netif_running(dev))
9584 return -EAGAIN;
9585
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009586 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009587 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9588 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9589 eeprom->len, eeprom->len);
9590
9591 /* parameters already validated in ethtool_get_eeprom */
9592
9593 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9594
9595 return rc;
9596}
9597
9598static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9599 u32 cmd_flags)
9600{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009601 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009602
9603 /* build the command word */
9604 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9605
9606 /* need to clear DONE bit separately */
9607 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9608
9609 /* write the data */
9610 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9611
9612 /* address of the NVRAM to write to */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9614 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9615
9616 /* issue the write command */
9617 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9618
9619 /* adjust timeout for emulation/FPGA */
9620 count = NVRAM_TIMEOUT_COUNT;
9621 if (CHIP_REV_IS_SLOW(bp))
9622 count *= 100;
9623
9624 /* wait for completion */
9625 rc = -EBUSY;
9626 for (i = 0; i < count; i++) {
9627 udelay(5);
9628 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9629 if (val & MCPR_NVM_COMMAND_DONE) {
9630 rc = 0;
9631 break;
9632 }
9633 }
9634
9635 return rc;
9636}
9637
Eliezer Tamirf1410642008-02-28 11:51:50 -08009638#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009639
9640static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9641 int buf_size)
9642{
9643 int rc;
9644 u32 cmd_flags;
9645 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009646 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009647
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009648 if (offset + buf_size > bp->common.flash_size) {
9649 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009650 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009651 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009652 return -EINVAL;
9653 }
9654
9655 /* request access to nvram interface */
9656 rc = bnx2x_acquire_nvram_lock(bp);
9657 if (rc)
9658 return rc;
9659
9660 /* enable access to nvram interface */
9661 bnx2x_enable_nvram_access(bp);
9662
9663 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9664 align_offset = (offset & ~0x03);
9665 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9666
9667 if (rc == 0) {
9668 val &= ~(0xff << BYTE_OFFSET(offset));
9669 val |= (*data_buf << BYTE_OFFSET(offset));
9670
9671 /* nvram data is returned as an array of bytes
9672 * convert it back to cpu order */
9673 val = be32_to_cpu(val);
9674
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009675 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9676 cmd_flags);
9677 }
9678
9679 /* disable access to nvram interface */
9680 bnx2x_disable_nvram_access(bp);
9681 bnx2x_release_nvram_lock(bp);
9682
9683 return rc;
9684}
9685
9686static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9687 int buf_size)
9688{
9689 int rc;
9690 u32 cmd_flags;
9691 u32 val;
9692 u32 written_so_far;
9693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009694 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009695 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009696
9697 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009698 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009699 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009700 offset, buf_size);
9701 return -EINVAL;
9702 }
9703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009704 if (offset + buf_size > bp->common.flash_size) {
9705 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009706 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009707 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009708 return -EINVAL;
9709 }
9710
9711 /* request access to nvram interface */
9712 rc = bnx2x_acquire_nvram_lock(bp);
9713 if (rc)
9714 return rc;
9715
9716 /* enable access to nvram interface */
9717 bnx2x_enable_nvram_access(bp);
9718
9719 written_so_far = 0;
9720 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9721 while ((written_so_far < buf_size) && (rc == 0)) {
9722 if (written_so_far == (buf_size - sizeof(u32)))
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9726 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9727 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9728
9729 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009730
9731 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9732
9733 /* advance to the next dword */
9734 offset += sizeof(u32);
9735 data_buf += sizeof(u32);
9736 written_so_far += sizeof(u32);
9737 cmd_flags = 0;
9738 }
9739
9740 /* disable access to nvram interface */
9741 bnx2x_disable_nvram_access(bp);
9742 bnx2x_release_nvram_lock(bp);
9743
9744 return rc;
9745}
9746
9747static int bnx2x_set_eeprom(struct net_device *dev,
9748 struct ethtool_eeprom *eeprom, u8 *eebuf)
9749{
9750 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009751 int port = BP_PORT(bp);
9752 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009753
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009754 if (!netif_running(dev))
9755 return -EAGAIN;
9756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009757 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009758 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9759 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9760 eeprom->len, eeprom->len);
9761
9762 /* parameters already validated in ethtool_set_eeprom */
9763
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009764 /* PHY eeprom can be accessed only by the PMF */
9765 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9766 !bp->port.pmf)
9767 return -EINVAL;
9768
9769 if (eeprom->magic == 0x50485950) {
9770 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9771 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9772
9773 bnx2x_acquire_phy_lock(bp);
9774 rc |= bnx2x_link_reset(&bp->link_params,
9775 &bp->link_vars, 0);
9776 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9777 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9778 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9779 MISC_REGISTERS_GPIO_HIGH, port);
9780 bnx2x_release_phy_lock(bp);
9781 bnx2x_link_report(bp);
9782
9783 } else if (eeprom->magic == 0x50485952) {
9784 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009785 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009786 bnx2x_acquire_phy_lock(bp);
9787 rc |= bnx2x_link_reset(&bp->link_params,
9788 &bp->link_vars, 1);
9789
9790 rc |= bnx2x_phy_init(&bp->link_params,
9791 &bp->link_vars);
9792 bnx2x_release_phy_lock(bp);
9793 bnx2x_calc_fc_adv(bp);
9794 }
9795 } else if (eeprom->magic == 0x53985943) {
9796 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9797 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9799 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009800 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009801
9802 /* DSP Remove Download Mode */
9803 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9804 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009805
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009806 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009807
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009808 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9809
9810 /* wait 0.5 sec to allow it to run */
9811 msleep(500);
9812 bnx2x_ext_phy_hw_reset(bp, port);
9813 msleep(500);
9814 bnx2x_release_phy_lock(bp);
9815 }
9816 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009817 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009818
9819 return rc;
9820}
9821
9822static int bnx2x_get_coalesce(struct net_device *dev,
9823 struct ethtool_coalesce *coal)
9824{
9825 struct bnx2x *bp = netdev_priv(dev);
9826
9827 memset(coal, 0, sizeof(struct ethtool_coalesce));
9828
9829 coal->rx_coalesce_usecs = bp->rx_ticks;
9830 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009831
9832 return 0;
9833}
9834
Eilon Greensteinca003922009-08-12 22:53:28 -07009835#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009836static int bnx2x_set_coalesce(struct net_device *dev,
9837 struct ethtool_coalesce *coal)
9838{
9839 struct bnx2x *bp = netdev_priv(dev);
9840
9841 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009842 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9843 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009844
9845 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009846 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9847 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009848
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009849 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009850 bnx2x_update_coalesce(bp);
9851
9852 return 0;
9853}
9854
9855static void bnx2x_get_ringparam(struct net_device *dev,
9856 struct ethtool_ringparam *ering)
9857{
9858 struct bnx2x *bp = netdev_priv(dev);
9859
9860 ering->rx_max_pending = MAX_RX_AVAIL;
9861 ering->rx_mini_max_pending = 0;
9862 ering->rx_jumbo_max_pending = 0;
9863
9864 ering->rx_pending = bp->rx_ring_size;
9865 ering->rx_mini_pending = 0;
9866 ering->rx_jumbo_pending = 0;
9867
9868 ering->tx_max_pending = MAX_TX_AVAIL;
9869 ering->tx_pending = bp->tx_ring_size;
9870}
9871
9872static int bnx2x_set_ringparam(struct net_device *dev,
9873 struct ethtool_ringparam *ering)
9874{
9875 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009876 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009877
9878 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9879 (ering->tx_pending > MAX_TX_AVAIL) ||
9880 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9881 return -EINVAL;
9882
9883 bp->rx_ring_size = ering->rx_pending;
9884 bp->tx_ring_size = ering->tx_pending;
9885
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009886 if (netif_running(dev)) {
9887 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9888 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009889 }
9890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009891 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009892}
9893
9894static void bnx2x_get_pauseparam(struct net_device *dev,
9895 struct ethtool_pauseparam *epause)
9896{
9897 struct bnx2x *bp = netdev_priv(dev);
9898
Eilon Greenstein356e2382009-02-12 08:38:32 +00009899 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9900 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009901 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9902
David S. Millerc0700f92008-12-16 23:53:20 -08009903 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9904 BNX2X_FLOW_CTRL_RX);
9905 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9906 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009907
9908 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9909 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9910 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9911}
9912
9913static int bnx2x_set_pauseparam(struct net_device *dev,
9914 struct ethtool_pauseparam *epause)
9915{
9916 struct bnx2x *bp = netdev_priv(dev);
9917
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009918 if (IS_E1HMF(bp))
9919 return 0;
9920
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9924
David S. Millerc0700f92008-12-16 23:53:20 -08009925 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009926
9927 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009928 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009929
9930 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009932
David S. Millerc0700f92008-12-16 23:53:20 -08009933 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9934 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009935
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009936 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009937 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009938 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009939 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009940 }
9941
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009942 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009944 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009945
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009946 DP(NETIF_MSG_LINK,
9947 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009948
9949 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009951 bnx2x_link_set(bp);
9952 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009953
9954 return 0;
9955}
9956
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009957static int bnx2x_set_flags(struct net_device *dev, u32 data)
9958{
9959 struct bnx2x *bp = netdev_priv(dev);
9960 int changed = 0;
9961 int rc = 0;
9962
9963 /* TPA requires Rx CSUM offloading */
9964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965 if (!(dev->features & NETIF_F_LRO)) {
9966 dev->features |= NETIF_F_LRO;
9967 bp->flags |= TPA_ENABLE_FLAG;
9968 changed = 1;
9969 }
9970
9971 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG;
9974 changed = 1;
9975 }
9976
9977 if (changed && netif_running(dev)) {
9978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980 }
9981
9982 return rc;
9983}
9984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009985static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986{
9987 struct bnx2x *bp = netdev_priv(dev);
9988
9989 return bp->rx_csum;
9990}
9991
9992static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993{
9994 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009995 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009996
9997 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009998
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10001 if (!data) {
10002 u32 flags = ethtool_op_get_flags(dev);
10003
10004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005 }
10006
10007 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010008}
10009
10010static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011{
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010012 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010014 dev->features |= NETIF_F_TSO6;
10015 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010017 dev->features &= ~NETIF_F_TSO6;
10018 }
10019
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010020 return 0;
10021}
10022
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010023static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010024 char string[ETH_GSTRING_LEN];
10025} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010032 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010033};
10034
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010035static int bnx2x_test_registers(struct bnx2x *bp)
10036{
10037 int idx, i, rc = -ENODEV;
10038 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010039 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010040 static const struct {
10041 u32 offset0;
10042 u32 offset1;
10043 u32 mask;
10044 } reg_tbl[] = {
10045/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10055/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010063 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010064 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10065/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010066 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010074 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10075/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010076 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10082
10083 { 0xffffffff, 0, 0x00000000 }
10084 };
10085
10086 if (!netif_running(bp->dev))
10087 return rc;
10088
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx = 0; idx < 2; idx++) {
10092
10093 switch (idx) {
10094 case 0:
10095 wr_val = 0;
10096 break;
10097 case 1:
10098 wr_val = 0xffffffff;
10099 break;
10100 }
10101
10102 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010104
10105 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106 mask = reg_tbl[i].mask;
10107
10108 save_val = REG_RD(bp, offset);
10109
10110 REG_WR(bp, offset, wr_val);
10111 val = REG_RD(bp, offset);
10112
10113 /* Restore the original register's value */
10114 REG_WR(bp, offset, save_val);
10115
10116 /* verify that value is as expected value */
10117 if ((val & mask) != (wr_val & mask))
10118 goto test_reg_exit;
10119 }
10120 }
10121
10122 rc = 0;
10123
10124test_reg_exit:
10125 return rc;
10126}
10127
10128static int bnx2x_test_memory(struct bnx2x *bp)
10129{
10130 int i, j, rc = -ENODEV;
10131 u32 val;
10132 static const struct {
10133 u32 offset;
10134 int size;
10135 } mem_tbl[] = {
10136 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10137 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10139 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10140 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10141 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10142 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144 { 0xffffffff, 0 }
10145 };
10146 static const struct {
10147 char *name;
10148 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010149 u32 e1_mask;
10150 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010151 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010158
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010159 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010160 };
10161
10162 if (!netif_running(bp->dev))
10163 return rc;
10164
10165 /* Go through all the memories */
10166 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167 for (j = 0; j < mem_tbl[i].size; j++)
10168 REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170 /* Check the parity status */
10171 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010173 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010175 DP(NETIF_MSG_HW,
10176 "%s is 0x%x\n", prty_tbl[i].name, val);
10177 goto test_mem_exit;
10178 }
10179 }
10180
10181 rc = 0;
10182
10183test_mem_exit:
10184 return rc;
10185}
10186
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010187static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188{
10189 int cnt = 1000;
10190
10191 if (link_up)
10192 while (bnx2x_link_test(bp) && cnt--)
10193 msleep(10);
10194}
10195
10196static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197{
10198 unsigned int pkt_size, num_pkts, i;
10199 struct sk_buff *skb;
10200 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010201 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010202 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010203 u16 tx_start_idx, tx_idx;
10204 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010205 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010206 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010207 struct eth_tx_start_bd *tx_start_bd;
10208 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010209 dma_addr_t mapping;
10210 union eth_rx_cqe *cqe;
10211 u8 cqe_fp_flags;
10212 struct sw_rx_bd *rx_buf;
10213 u16 len;
10214 int rc = -ENODEV;
10215
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010216 /* check the loopback mode */
10217 switch (loopback_mode) {
10218 case BNX2X_PHY_LOOPBACK:
10219 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220 return -EINVAL;
10221 break;
10222 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010223 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010224 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010225 break;
10226 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010227 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010228 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010229
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010230 /* prepare the loopback packet */
10231 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010233 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234 if (!skb) {
10235 rc = -ENOMEM;
10236 goto test_loopback_exit;
10237 }
10238 packet = skb_put(skb, pkt_size);
10239 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010240 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010242 for (i = ETH_HLEN; i < pkt_size; i++)
10243 packet[i] = (unsigned char) (i & 0xff);
10244
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010245 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010246 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010247 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010249
Eilon Greensteinca003922009-08-12 22:53:28 -070010250 pkt_prod = fp_tx->tx_pkt_prod++;
10251 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010253 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010254 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010255
Eilon Greensteinca003922009-08-12 22:53:28 -070010256 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010258 mapping = pci_map_single(bp->pdev, skb->data,
10259 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269 /* turn on parsing and get a BD */
10270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010274
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010275 wmb();
10276
Eilon Greensteinca003922009-08-12 22:53:28 -070010277 fp_tx->tx_db.data.prod += 2;
10278 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010279 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010280
10281 mmiowb();
10282
10283 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010284 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010285 bp->dev->trans_start = jiffies;
10286
10287 udelay(100);
10288
Eilon Greensteinca003922009-08-12 22:53:28 -070010289 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010290 if (tx_idx != tx_start_idx + num_pkts)
10291 goto test_loopback_exit;
10292
Eilon Greensteinca003922009-08-12 22:53:28 -070010293 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010294 if (rx_idx != rx_start_idx + num_pkts)
10295 goto test_loopback_exit;
10296
Eilon Greensteinca003922009-08-12 22:53:28 -070010297 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010298 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10299 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10300 goto test_loopback_rx_exit;
10301
10302 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10303 if (len != pkt_size)
10304 goto test_loopback_rx_exit;
10305
Eilon Greensteinca003922009-08-12 22:53:28 -070010306 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010307 skb = rx_buf->skb;
10308 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10309 for (i = ETH_HLEN; i < pkt_size; i++)
10310 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10311 goto test_loopback_rx_exit;
10312
10313 rc = 0;
10314
10315test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010316
Eilon Greensteinca003922009-08-12 22:53:28 -070010317 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10318 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10319 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10320 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010321
10322 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010323 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10324 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010325
10326test_loopback_exit:
10327 bp->link_params.loopback_mode = LOOPBACK_NONE;
10328
10329 return rc;
10330}
10331
10332static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10333{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010334 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010335
10336 if (!netif_running(bp->dev))
10337 return BNX2X_LOOPBACK_FAILED;
10338
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010339 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010340 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010341
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010342 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10343 if (res) {
10344 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10345 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010346 }
10347
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010348 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10349 if (res) {
10350 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10351 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010352 }
10353
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010354 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010355 bnx2x_netif_start(bp);
10356
10357 return rc;
10358}
10359
10360#define CRC32_RESIDUAL 0xdebb20e3
10361
10362static int bnx2x_test_nvram(struct bnx2x *bp)
10363{
10364 static const struct {
10365 int offset;
10366 int size;
10367 } nvram_tbl[] = {
10368 { 0, 0x14 }, /* bootstrap */
10369 { 0x14, 0xec }, /* dir */
10370 { 0x100, 0x350 }, /* manuf_info */
10371 { 0x450, 0xf0 }, /* feature_info */
10372 { 0x640, 0x64 }, /* upgrade_key_info */
10373 { 0x6a4, 0x64 },
10374 { 0x708, 0x70 }, /* manuf_key_info */
10375 { 0x778, 0x70 },
10376 { 0, 0 }
10377 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010378 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010379 u8 *data = (u8 *)buf;
10380 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010381 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010382
10383 rc = bnx2x_nvram_read(bp, 0, data, 4);
10384 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010385 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010386 goto test_nvram_exit;
10387 }
10388
10389 magic = be32_to_cpu(buf[0]);
10390 if (magic != 0x669955aa) {
10391 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10392 rc = -ENODEV;
10393 goto test_nvram_exit;
10394 }
10395
10396 for (i = 0; nvram_tbl[i].size; i++) {
10397
10398 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10399 nvram_tbl[i].size);
10400 if (rc) {
10401 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010402 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010403 goto test_nvram_exit;
10404 }
10405
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010406 crc = ether_crc_le(nvram_tbl[i].size, data);
10407 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010408 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010409 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010410 rc = -ENODEV;
10411 goto test_nvram_exit;
10412 }
10413 }
10414
10415test_nvram_exit:
10416 return rc;
10417}
10418
10419static int bnx2x_test_intr(struct bnx2x *bp)
10420{
10421 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10422 int i, rc;
10423
10424 if (!netif_running(bp->dev))
10425 return -ENODEV;
10426
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010427 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010428 if (CHIP_IS_E1(bp))
10429 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10430 else
10431 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010432 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010433 config->hdr.reserved1 = 0;
10434
Michael Chane665bfd2009-10-10 13:46:54 +000010435 bp->set_mac_pending++;
10436 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010437 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10438 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10439 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10440 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010441 for (i = 0; i < 10; i++) {
10442 if (!bp->set_mac_pending)
10443 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010444 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010445 msleep_interruptible(10);
10446 }
10447 if (i == 10)
10448 rc = -ENODEV;
10449 }
10450
10451 return rc;
10452}
10453
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010454static void bnx2x_self_test(struct net_device *dev,
10455 struct ethtool_test *etest, u64 *buf)
10456{
10457 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010458
10459 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10460
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010461 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010462 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010463
Eilon Greenstein33471622008-08-13 15:59:08 -070010464 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010465 if (IS_E1HMF(bp))
10466 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10467
10468 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010469 int port = BP_PORT(bp);
10470 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010471 u8 link_up;
10472
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010473 /* save current value of input enable for TX port IF */
10474 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10475 /* disable input for TX port IF */
10476 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10477
Eilon Greenstein061bc702009-10-15 00:18:47 -070010478 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010479 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10480 bnx2x_nic_load(bp, LOAD_DIAG);
10481 /* wait until link state is restored */
10482 bnx2x_wait_for_link(bp, link_up);
10483
10484 if (bnx2x_test_registers(bp) != 0) {
10485 buf[0] = 1;
10486 etest->flags |= ETH_TEST_FL_FAILED;
10487 }
10488 if (bnx2x_test_memory(bp) != 0) {
10489 buf[1] = 1;
10490 etest->flags |= ETH_TEST_FL_FAILED;
10491 }
10492 buf[2] = bnx2x_test_loopback(bp, link_up);
10493 if (buf[2] != 0)
10494 etest->flags |= ETH_TEST_FL_FAILED;
10495
10496 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010497
10498 /* restore input for TX port IF */
10499 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10500
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010501 bnx2x_nic_load(bp, LOAD_NORMAL);
10502 /* wait until link state is restored */
10503 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010504 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010505 if (bnx2x_test_nvram(bp) != 0) {
10506 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010507 etest->flags |= ETH_TEST_FL_FAILED;
10508 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010509 if (bnx2x_test_intr(bp) != 0) {
10510 buf[4] = 1;
10511 etest->flags |= ETH_TEST_FL_FAILED;
10512 }
10513 if (bp->port.pmf)
10514 if (bnx2x_link_test(bp) != 0) {
10515 buf[5] = 1;
10516 etest->flags |= ETH_TEST_FL_FAILED;
10517 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010518
10519#ifdef BNX2X_EXTRA_DEBUG
10520 bnx2x_panic_dump(bp);
10521#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010522}
10523
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010524static const struct {
10525 long offset;
10526 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010527 u8 string[ETH_GSTRING_LEN];
10528} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10529/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10530 { Q_STATS_OFFSET32(error_bytes_received_hi),
10531 8, "[%d]: rx_error_bytes" },
10532 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10533 8, "[%d]: rx_ucast_packets" },
10534 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10535 8, "[%d]: rx_mcast_packets" },
10536 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10537 8, "[%d]: rx_bcast_packets" },
10538 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10539 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10540 4, "[%d]: rx_phy_ip_err_discards"},
10541 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10542 4, "[%d]: rx_skb_alloc_discard" },
10543 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10544
10545/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10546 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10547 8, "[%d]: tx_packets" }
10548};
10549
10550static const struct {
10551 long offset;
10552 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010553 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010554#define STATS_FLAGS_PORT 1
10555#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010556#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010557 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010558} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010559/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10560 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010561 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010562 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010563 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010564 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010565 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010566 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010567 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010568 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010569 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010570 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010571 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010572 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010573 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10574 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10575 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10576 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10577/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10578 8, STATS_FLAGS_PORT, "rx_fragments" },
10579 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10580 8, STATS_FLAGS_PORT, "rx_jabbers" },
10581 { STATS_OFFSET32(no_buff_discard_hi),
10582 8, STATS_FLAGS_BOTH, "rx_discards" },
10583 { STATS_OFFSET32(mac_filter_discard),
10584 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10585 { STATS_OFFSET32(xxoverflow_discard),
10586 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10587 { STATS_OFFSET32(brb_drop_hi),
10588 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10589 { STATS_OFFSET32(brb_truncate_hi),
10590 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10591 { STATS_OFFSET32(pause_frames_received_hi),
10592 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10593 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10594 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10595 { STATS_OFFSET32(nig_timer_max),
10596 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10597/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10598 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10599 { STATS_OFFSET32(rx_skb_alloc_failed),
10600 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10601 { STATS_OFFSET32(hw_csum_err),
10602 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10603
10604 { STATS_OFFSET32(total_bytes_transmitted_hi),
10605 8, STATS_FLAGS_BOTH, "tx_bytes" },
10606 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10607 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10608 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10609 8, STATS_FLAGS_BOTH, "tx_packets" },
10610 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10611 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10612 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10613 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010614 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010615 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010616 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010617 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010618/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010619 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010620 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010621 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010622 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010623 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010624 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010625 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010626 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010627 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010628 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010629 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010630 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010631 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010632 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010633 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010634 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010635 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010636 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010637 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010638/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010639 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010640 { STATS_OFFSET32(pause_frames_sent_hi),
10641 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010642};
10643
Eilon Greensteinde832a52009-02-12 08:36:33 +000010644#define IS_PORT_STAT(i) \
10645 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647#define IS_E1HMF_MODE_STAT(bp) \
10648 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010649
Ben Hutchings15f0a392009-10-01 11:58:24 +000010650static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10651{
10652 struct bnx2x *bp = netdev_priv(dev);
10653 int i, num_stats;
10654
10655 switch(stringset) {
10656 case ETH_SS_STATS:
10657 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010658 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000010659 if (!IS_E1HMF_MODE_STAT(bp))
10660 num_stats += BNX2X_NUM_STATS;
10661 } else {
10662 if (IS_E1HMF_MODE_STAT(bp)) {
10663 num_stats = 0;
10664 for (i = 0; i < BNX2X_NUM_STATS; i++)
10665 if (IS_FUNC_STAT(i))
10666 num_stats++;
10667 } else
10668 num_stats = BNX2X_NUM_STATS;
10669 }
10670 return num_stats;
10671
10672 case ETH_SS_TEST:
10673 return BNX2X_NUM_TESTS;
10674
10675 default:
10676 return -EINVAL;
10677 }
10678}
10679
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010680static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10681{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010682 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010683 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010684
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010685 switch (stringset) {
10686 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010687 if (is_multi(bp)) {
10688 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010689 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010690 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10691 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10692 bnx2x_q_stats_arr[j].string, i);
10693 k += BNX2X_NUM_Q_STATS;
10694 }
10695 if (IS_E1HMF_MODE_STAT(bp))
10696 break;
10697 for (j = 0; j < BNX2X_NUM_STATS; j++)
10698 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10699 bnx2x_stats_arr[j].string);
10700 } else {
10701 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10702 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10703 continue;
10704 strcpy(buf + j*ETH_GSTRING_LEN,
10705 bnx2x_stats_arr[i].string);
10706 j++;
10707 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010708 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010709 break;
10710
10711 case ETH_SS_TEST:
10712 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10713 break;
10714 }
10715}
10716
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010717static void bnx2x_get_ethtool_stats(struct net_device *dev,
10718 struct ethtool_stats *stats, u64 *buf)
10719{
10720 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010721 u32 *hw_stats, *offset;
10722 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010723
Eilon Greensteinde832a52009-02-12 08:36:33 +000010724 if (is_multi(bp)) {
10725 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010726 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010727 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10728 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10729 if (bnx2x_q_stats_arr[j].size == 0) {
10730 /* skip this counter */
10731 buf[k + j] = 0;
10732 continue;
10733 }
10734 offset = (hw_stats +
10735 bnx2x_q_stats_arr[j].offset);
10736 if (bnx2x_q_stats_arr[j].size == 4) {
10737 /* 4-byte counter */
10738 buf[k + j] = (u64) *offset;
10739 continue;
10740 }
10741 /* 8-byte counter */
10742 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10743 }
10744 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010745 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010746 if (IS_E1HMF_MODE_STAT(bp))
10747 return;
10748 hw_stats = (u32 *)&bp->eth_stats;
10749 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10750 if (bnx2x_stats_arr[j].size == 0) {
10751 /* skip this counter */
10752 buf[k + j] = 0;
10753 continue;
10754 }
10755 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10756 if (bnx2x_stats_arr[j].size == 4) {
10757 /* 4-byte counter */
10758 buf[k + j] = (u64) *offset;
10759 continue;
10760 }
10761 /* 8-byte counter */
10762 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010763 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010764 } else {
10765 hw_stats = (u32 *)&bp->eth_stats;
10766 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10767 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10768 continue;
10769 if (bnx2x_stats_arr[i].size == 0) {
10770 /* skip this counter */
10771 buf[j] = 0;
10772 j++;
10773 continue;
10774 }
10775 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10776 if (bnx2x_stats_arr[i].size == 4) {
10777 /* 4-byte counter */
10778 buf[j] = (u64) *offset;
10779 j++;
10780 continue;
10781 }
10782 /* 8-byte counter */
10783 buf[j] = HILO_U64(*offset, *(offset + 1));
10784 j++;
10785 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010786 }
10787}
10788
10789static int bnx2x_phys_id(struct net_device *dev, u32 data)
10790{
10791 struct bnx2x *bp = netdev_priv(dev);
10792 int i;
10793
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010794 if (!netif_running(dev))
10795 return 0;
10796
10797 if (!bp->port.pmf)
10798 return 0;
10799
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010800 if (data == 0)
10801 data = 2;
10802
10803 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010804 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010805 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10806 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010807 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010808 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010809
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010810 msleep_interruptible(500);
10811 if (signal_pending(current))
10812 break;
10813 }
10814
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010815 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010816 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10817 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010818
10819 return 0;
10820}
10821
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010822static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010823 .get_settings = bnx2x_get_settings,
10824 .set_settings = bnx2x_set_settings,
10825 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010826 .get_regs_len = bnx2x_get_regs_len,
10827 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010828 .get_wol = bnx2x_get_wol,
10829 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010830 .get_msglevel = bnx2x_get_msglevel,
10831 .set_msglevel = bnx2x_set_msglevel,
10832 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010833 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010834 .get_eeprom_len = bnx2x_get_eeprom_len,
10835 .get_eeprom = bnx2x_get_eeprom,
10836 .set_eeprom = bnx2x_set_eeprom,
10837 .get_coalesce = bnx2x_get_coalesce,
10838 .set_coalesce = bnx2x_set_coalesce,
10839 .get_ringparam = bnx2x_get_ringparam,
10840 .set_ringparam = bnx2x_set_ringparam,
10841 .get_pauseparam = bnx2x_get_pauseparam,
10842 .set_pauseparam = bnx2x_set_pauseparam,
10843 .get_rx_csum = bnx2x_get_rx_csum,
10844 .set_rx_csum = bnx2x_set_rx_csum,
10845 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010846 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010847 .set_flags = bnx2x_set_flags,
10848 .get_flags = ethtool_op_get_flags,
10849 .get_sg = ethtool_op_get_sg,
10850 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010851 .get_tso = ethtool_op_get_tso,
10852 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010853 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010854 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010855 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010856 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010857 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010858};
10859
10860/* end of ethtool_ops */
10861
10862/****************************************************************************
10863* General service functions
10864****************************************************************************/
10865
10866static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10867{
10868 u16 pmcsr;
10869
10870 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10871
10872 switch (state) {
10873 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010874 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010875 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10876 PCI_PM_CTRL_PME_STATUS));
10877
10878 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010879 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010880 msleep(20);
10881 break;
10882
10883 case PCI_D3hot:
10884 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10885 pmcsr |= 3;
10886
10887 if (bp->wol)
10888 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10889
10890 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10891 pmcsr);
10892
10893 /* No more memory access after this point until
10894 * device is brought back to D0.
10895 */
10896 break;
10897
10898 default:
10899 return -EINVAL;
10900 }
10901 return 0;
10902}
10903
Eilon Greenstein237907c2009-01-14 06:42:44 +000010904static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10905{
10906 u16 rx_cons_sb;
10907
10908 /* Tell compiler that status block fields can change */
10909 barrier();
10910 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10911 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10912 rx_cons_sb++;
10913 return (fp->rx_comp_cons != rx_cons_sb);
10914}
10915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010916/*
10917 * net_device service functions
10918 */
10919
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010920static int bnx2x_poll(struct napi_struct *napi, int budget)
10921{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010922 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010923 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10924 napi);
10925 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010926
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010927 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010928#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010929 if (unlikely(bp->panic)) {
10930 napi_complete(napi);
10931 return 0;
10932 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010933#endif
10934
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010935 if (bnx2x_has_tx_work(fp))
10936 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010937
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010938 if (bnx2x_has_rx_work(fp)) {
10939 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010940
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010941 /* must not complete if we consumed full budget */
10942 if (work_done >= budget)
10943 break;
10944 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010945
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010946 /* Fall out from the NAPI loop if needed */
10947 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10948 bnx2x_update_fpsb_idx(fp);
10949 /* bnx2x_has_rx_work() reads the status block, thus we need
10950 * to ensure that status block indices have been actually read
10951 * (bnx2x_update_fpsb_idx) prior to this check
10952 * (bnx2x_has_rx_work) so that we won't write the "newer"
10953 * value of the status block to IGU (if there was a DMA right
10954 * after bnx2x_has_rx_work and if there is no rmb, the memory
10955 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956 * before bnx2x_ack_sb). In this case there will never be
10957 * another interrupt until there is another update of the
10958 * status block, while there is still unhandled work.
10959 */
10960 rmb();
10961
10962 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963 napi_complete(napi);
10964 /* Re-enable interrupts */
10965 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10966 le16_to_cpu(fp->fp_c_idx),
10967 IGU_INT_NOP, 1);
10968 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10969 le16_to_cpu(fp->fp_u_idx),
10970 IGU_INT_ENABLE, 1);
10971 break;
10972 }
10973 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000010974 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010975
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010976 return work_done;
10977}
10978
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010979
10980/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010981 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010982 * we use one mapping for both BDs
10983 * So far this has only been observed to happen
10984 * in Other Operating Systems(TM)
10985 */
10986static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10987 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070010988 struct sw_tx_bd *tx_buf,
10989 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010990 u16 bd_prod, int nbd)
10991{
Eilon Greensteinca003922009-08-12 22:53:28 -070010992 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010993 struct eth_tx_bd *d_tx_bd;
10994 dma_addr_t mapping;
10995 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10996
10997 /* first fix first BD */
10998 h_tx_bd->nbd = cpu_to_le16(nbd);
10999 h_tx_bd->nbytes = cpu_to_le16(hlen);
11000
11001 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11002 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11003 h_tx_bd->addr_lo, h_tx_bd->nbd);
11004
11005 /* now get a new data BD
11006 * (after the pbd) and fill it */
11007 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011008 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011009
11010 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11011 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11012
11013 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11014 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11015 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011016
11017 /* this marks the BD as one that has no individual mapping */
11018 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11019
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011020 DP(NETIF_MSG_TX_QUEUED,
11021 "TSO split data size is %d (%x:%x)\n",
11022 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11023
Eilon Greensteinca003922009-08-12 22:53:28 -070011024 /* update tx_bd */
11025 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011026
11027 return bd_prod;
11028}
11029
11030static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11031{
11032 if (fix > 0)
11033 csum = (u16) ~csum_fold(csum_sub(csum,
11034 csum_partial(t_header - fix, fix, 0)));
11035
11036 else if (fix < 0)
11037 csum = (u16) ~csum_fold(csum_add(csum,
11038 csum_partial(t_header, -fix, 0)));
11039
11040 return swab16(csum);
11041}
11042
11043static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11044{
11045 u32 rc;
11046
11047 if (skb->ip_summed != CHECKSUM_PARTIAL)
11048 rc = XMIT_PLAIN;
11049
11050 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011051 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011052 rc = XMIT_CSUM_V6;
11053 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11054 rc |= XMIT_CSUM_TCP;
11055
11056 } else {
11057 rc = XMIT_CSUM_V4;
11058 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11059 rc |= XMIT_CSUM_TCP;
11060 }
11061 }
11062
11063 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011064 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011065
11066 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011067 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011068
11069 return rc;
11070}
11071
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011072#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011073/* check if packet requires linearization (packet is too fragmented)
11074 no need to check fragmentation if page size > 8K (there will be no
11075 violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011076static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11077 u32 xmit_type)
11078{
11079 int to_copy = 0;
11080 int hlen = 0;
11081 int first_bd_sz = 0;
11082
11083 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11085
11086 if (xmit_type & XMIT_GSO) {
11087 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11088 /* Check if LSO packet needs to be copied:
11089 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011091 /* Number of windows to check */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011092 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11093 int wnd_idx = 0;
11094 int frag_idx = 0;
11095 u32 wnd_sum = 0;
11096
11097 /* Headers length */
11098 hlen = (int)(skb_transport_header(skb) - skb->data) +
11099 tcp_hdrlen(skb);
11100
11101 /* Amount of data (w/o headers) on linear part of SKB*/
11102 first_bd_sz = skb_headlen(skb) - hlen;
11103
11104 wnd_sum = first_bd_sz;
11105
11106 /* Calculate the first sum - it's special */
11107 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11108 wnd_sum +=
11109 skb_shinfo(skb)->frags[frag_idx].size;
11110
11111 /* If there was data on linear skb data - check it */
11112 if (first_bd_sz > 0) {
11113 if (unlikely(wnd_sum < lso_mss)) {
11114 to_copy = 1;
11115 goto exit_lbl;
11116 }
11117
11118 wnd_sum -= first_bd_sz;
11119 }
11120
11121 /* Others are easier: run through the frag list and
11122 check all windows */
11123 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11124 wnd_sum +=
11125 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11126
11127 if (unlikely(wnd_sum < lso_mss)) {
11128 to_copy = 1;
11129 break;
11130 }
11131 wnd_sum -=
11132 skb_shinfo(skb)->frags[wnd_idx].size;
11133 }
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011134 } else {
11135 /* in non-LSO too fragmented packet should always
11136 be linearized */
11137 to_copy = 1;
11138 }
11139 }
11140
11141exit_lbl:
11142 if (unlikely(to_copy))
11143 DP(NETIF_MSG_TX_QUEUED,
11144 "Linearization IS REQUIRED for %s packet. "
11145 "num_frags %d hlen %d first_bd_sz %d\n",
11146 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11147 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11148
11149 return to_copy;
11150}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011151#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011152
11153/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011154 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011155 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011156 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011157static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011158{
11159 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011160 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011161 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011162 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011163 struct eth_tx_start_bd *tx_start_bd;
11164 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011165 struct eth_tx_parse_bd *pbd = NULL;
11166 u16 pkt_prod, bd_prod;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011167 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011168 dma_addr_t mapping;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011169 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011170 int i;
11171 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011172 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011173
11174#ifdef BNX2X_STOP_ON_ERROR
11175 if (unlikely(bp->panic))
11176 return NETDEV_TX_BUSY;
11177#endif
11178
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011179 fp_index = skb_get_queue_mapping(skb);
11180 txq = netdev_get_tx_queue(dev, fp_index);
11181
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011182 fp = &bp->fp[fp_index];
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011183
Yitchak Gertner231fd582008-08-25 15:27:06 -070011184 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011185 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011186 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011187 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188 return NETDEV_TX_BUSY;
11189 }
11190
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011191 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11192 " gso type %x xmit_type %x\n",
11193 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11194 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11195
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011196#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011197 /* First, check if we need to linearize the skb (due to FW
11198 restrictions). No need to check fragmentation if page size > 8K
11199 (there will be no violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011200 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11201 /* Statistics of linearization */
11202 bp->lin_cnt++;
11203 if (skb_linearize(skb) != 0) {
11204 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11205 "silently dropping this SKB\n");
11206 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011207 return NETDEV_TX_OK;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011208 }
11209 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011210#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011211
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011212 /*
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011213 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011214 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011215 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011216 (don't forget to mark the last one as last,
11217 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011218 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011219 */
11220
11221 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011222 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011223
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011224 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011225 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011226 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011227
Eilon Greensteinca003922009-08-12 22:53:28 -070011228 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11229 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11230 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011231 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011232 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011233
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011234 /* remember the first BD of the packet */
11235 tx_buf->first_bd = fp->tx_bd_prod;
11236 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011237 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011238
11239 DP(NETIF_MSG_TX_QUEUED,
11240 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011241 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011242
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011243#ifdef BCM_VLAN
11244 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11245 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011246 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11247 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011248 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011249#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011250 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011251
Eilon Greensteinca003922009-08-12 22:53:28 -070011252 /* turn on parsing and get a BD */
11253 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11254 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011255
Eilon Greensteinca003922009-08-12 22:53:28 -070011256 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011257
11258 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011259 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011260
11261 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011262 pbd->global_data =
11263 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11264 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011265
11266 pbd->ip_hlen = (skb_transport_header(skb) -
11267 skb_network_header(skb)) / 2;
11268
11269 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11270
11271 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011272 hlen = hlen*2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011273
Eilon Greensteinca003922009-08-12 22:53:28 -070011274 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011275
11276 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011277 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011278 ETH_TX_BD_FLAGS_IP_CSUM;
11279 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011280 tx_start_bd->bd_flags.as_bitfield |=
11281 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011282
11283 if (xmit_type & XMIT_CSUM_TCP) {
11284 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11285
11286 } else {
11287 s8 fix = SKB_CS_OFF(skb); /* signed! */
11288
Eilon Greensteinca003922009-08-12 22:53:28 -070011289 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011290
11291 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011292 "hlen %d fix %d csum before fix %x\n",
11293 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011294
11295 /* HW bug: fixup the CSUM */
11296 pbd->tcp_pseudo_csum =
11297 bnx2x_csum_fix(skb_transport_header(skb),
11298 SKB_CS(skb), fix);
11299
11300 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11301 pbd->tcp_pseudo_csum);
11302 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011303 }
11304
11305 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011306 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011307
Eilon Greensteinca003922009-08-12 22:53:28 -070011308 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11309 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11310 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11311 tx_start_bd->nbd = cpu_to_le16(nbd);
11312 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11313 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011314
11315 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011316 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011317 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11318 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11319 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011320
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011321 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011322
11323 DP(NETIF_MSG_TX_QUEUED,
11324 "TSO packet len %d hlen %d total len %d tso size %d\n",
11325 skb->len, hlen, skb_headlen(skb),
11326 skb_shinfo(skb)->gso_size);
11327
Eilon Greensteinca003922009-08-12 22:53:28 -070011328 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011329
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011330 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011331 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11332 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011333
11334 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11335 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011336 pbd->tcp_flags = pbd_tcp_flags(skb);
11337
11338 if (xmit_type & XMIT_GSO_V4) {
11339 pbd->ip_id = swab16(ip_hdr(skb)->id);
11340 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011341 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11342 ip_hdr(skb)->daddr,
11343 0, IPPROTO_TCP, 0));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011344
11345 } else
11346 pbd->tcp_pseudo_csum =
11347 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11348 &ipv6_hdr(skb)->daddr,
11349 0, IPPROTO_TCP, 0));
11350
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011351 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11352 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011353 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011354
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011355 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011357
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011358 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011359 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11360 if (total_pkt_bd == NULL)
11361 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011362
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011363 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11364 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011365
Eilon Greensteinca003922009-08-12 22:53:28 -070011366 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11367 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11368 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11369 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011370
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011371 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011372 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11373 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11374 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011375 }
11376
Eilon Greensteinca003922009-08-12 22:53:28 -070011377 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011378
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011379 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11380
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011381 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011382 * if the packet contains or ends with it
11383 */
11384 if (TX_BD_POFF(bd_prod) < nbd)
11385 nbd++;
11386
Eilon Greensteinca003922009-08-12 22:53:28 -070011387 if (total_pkt_bd != NULL)
11388 total_pkt_bd->total_pkt_bytes = pkt_size;
11389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011390 if (pbd)
11391 DP(NETIF_MSG_TX_QUEUED,
11392 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11393 " tcp_flags %x xsum %x seq %u hlen %u\n",
11394 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11395 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011396 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011397
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011398 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011399
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011400 /*
11401 * Make sure that the BD data is updated before updating the producer
11402 * since FW might read the BD right after the producer is updated.
11403 * This is only applicable for weak-ordered memory model archs such
11404 * as IA-64. The following barrier is also mandatory since FW will
11405 * assumes packets must have BDs.
11406 */
11407 wmb();
11408
Eilon Greensteinca003922009-08-12 22:53:28 -070011409 fp->tx_db.data.prod += nbd;
11410 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011411 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011412
11413 mmiowb();
11414
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011415 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011416
11417 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011418 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011419 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420 if we put Tx into XOFF state. */
11421 smp_mb();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011422 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011423 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011424 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011425 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011426 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011427
11428 return NETDEV_TX_OK;
11429}
11430
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011431/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011432static int bnx2x_open(struct net_device *dev)
11433{
11434 struct bnx2x *bp = netdev_priv(dev);
11435
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011436 netif_carrier_off(dev);
11437
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011438 bnx2x_set_power_state(bp, PCI_D0);
11439
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011440 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011441}
11442
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011443/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011444static int bnx2x_close(struct net_device *dev)
11445{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011446 struct bnx2x *bp = netdev_priv(dev);
11447
11448 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011449 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11450 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11451 if (!CHIP_REV_IS_SLOW(bp))
11452 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011453
11454 return 0;
11455}
11456
Eilon Greensteinf5372252009-02-12 08:38:30 +000011457/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011458static void bnx2x_set_rx_mode(struct net_device *dev)
11459{
11460 struct bnx2x *bp = netdev_priv(dev);
11461 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11462 int port = BP_PORT(bp);
11463
11464 if (bp->state != BNX2X_STATE_OPEN) {
11465 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11466 return;
11467 }
11468
11469 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11470
11471 if (dev->flags & IFF_PROMISC)
11472 rx_mode = BNX2X_RX_MODE_PROMISC;
11473
11474 else if ((dev->flags & IFF_ALLMULTI) ||
11475 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11476 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11477
11478 else { /* some multicasts */
11479 if (CHIP_IS_E1(bp)) {
11480 int i, old, offset;
11481 struct dev_mc_list *mclist;
11482 struct mac_configuration_cmd *config =
11483 bnx2x_sp(bp, mcast_config);
11484
11485 for (i = 0, mclist = dev->mc_list;
11486 mclist && (i < dev->mc_count);
11487 i++, mclist = mclist->next) {
11488
11489 config->config_table[i].
11490 cam_entry.msb_mac_addr =
11491 swab16(*(u16 *)&mclist->dmi_addr[0]);
11492 config->config_table[i].
11493 cam_entry.middle_mac_addr =
11494 swab16(*(u16 *)&mclist->dmi_addr[2]);
11495 config->config_table[i].
11496 cam_entry.lsb_mac_addr =
11497 swab16(*(u16 *)&mclist->dmi_addr[4]);
11498 config->config_table[i].cam_entry.flags =
11499 cpu_to_le16(port);
11500 config->config_table[i].
11501 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011502 config->config_table[i].target_table_entry.
11503 clients_bit_vector =
11504 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011505 config->config_table[i].
11506 target_table_entry.vlan_id = 0;
11507
11508 DP(NETIF_MSG_IFUP,
11509 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11510 config->config_table[i].
11511 cam_entry.msb_mac_addr,
11512 config->config_table[i].
11513 cam_entry.middle_mac_addr,
11514 config->config_table[i].
11515 cam_entry.lsb_mac_addr);
11516 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011517 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011518 if (old > i) {
11519 for (; i < old; i++) {
11520 if (CAM_IS_INVALID(config->
11521 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011522 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011523 break;
11524 }
11525 /* invalidate */
11526 CAM_INVALIDATE(config->
11527 config_table[i]);
11528 }
11529 }
11530
11531 if (CHIP_REV_IS_SLOW(bp))
11532 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11533 else
11534 offset = BNX2X_MAX_MULTICAST*(1 + port);
11535
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011536 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011537 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011538 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011539 config->hdr.reserved1 = 0;
11540
Michael Chane665bfd2009-10-10 13:46:54 +000011541 bp->set_mac_pending++;
11542 smp_wmb();
11543
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011544 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11545 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11546 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11547 0);
11548 } else { /* E1H */
11549 /* Accept one or more multicasts */
11550 struct dev_mc_list *mclist;
11551 u32 mc_filter[MC_HASH_SIZE];
11552 u32 crc, bit, regidx;
11553 int i;
11554
11555 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11556
11557 for (i = 0, mclist = dev->mc_list;
11558 mclist && (i < dev->mc_count);
11559 i++, mclist = mclist->next) {
11560
Johannes Berg7c510e42008-10-27 17:47:26 -070011561 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11562 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011563
11564 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11565 bit = (crc >> 24) & 0xff;
11566 regidx = bit >> 5;
11567 bit &= 0x1f;
11568 mc_filter[regidx] |= (1 << bit);
11569 }
11570
11571 for (i = 0; i < MC_HASH_SIZE; i++)
11572 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11573 mc_filter[i]);
11574 }
11575 }
11576
11577 bp->rx_mode = rx_mode;
11578 bnx2x_set_storm_rx_mode(bp);
11579}
11580
11581/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011582static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11583{
11584 struct sockaddr *addr = p;
11585 struct bnx2x *bp = netdev_priv(dev);
11586
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011587 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011588 return -EINVAL;
11589
11590 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011591 if (netif_running(dev)) {
11592 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011593 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011594 else
Michael Chane665bfd2009-10-10 13:46:54 +000011595 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011596 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011597
11598 return 0;
11599}
11600
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011601/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011602static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11603 int devad, u16 addr)
11604{
11605 struct bnx2x *bp = netdev_priv(netdev);
11606 u16 value;
11607 int rc;
11608 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11609
11610 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11611 prtad, devad, addr);
11612
11613 if (prtad != bp->mdio.prtad) {
11614 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11615 prtad, bp->mdio.prtad);
11616 return -EINVAL;
11617 }
11618
11619 /* The HW expects different devad if CL22 is used */
11620 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11621
11622 bnx2x_acquire_phy_lock(bp);
11623 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11624 devad, addr, &value);
11625 bnx2x_release_phy_lock(bp);
11626 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11627
11628 if (!rc)
11629 rc = value;
11630 return rc;
11631}
11632
11633/* called with rtnl_lock */
11634static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11635 u16 addr, u16 value)
11636{
11637 struct bnx2x *bp = netdev_priv(netdev);
11638 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11639 int rc;
11640
11641 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11642 " value 0x%x\n", prtad, devad, addr, value);
11643
11644 if (prtad != bp->mdio.prtad) {
11645 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11646 prtad, bp->mdio.prtad);
11647 return -EINVAL;
11648 }
11649
11650 /* The HW expects different devad if CL22 is used */
11651 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11652
11653 bnx2x_acquire_phy_lock(bp);
11654 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11655 devad, addr, value);
11656 bnx2x_release_phy_lock(bp);
11657 return rc;
11658}
11659
11660/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011661static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11662{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011663 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011664 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011665
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011666 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11667 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011668
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011669 if (!netif_running(dev))
11670 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011671
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011672 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011673}
11674
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011675/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011676static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11677{
11678 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011679 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011680
11681 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11682 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11683 return -EINVAL;
11684
11685 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011686 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011687 * only updated as part of load
11688 */
11689 dev->mtu = new_mtu;
11690
11691 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011692 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11693 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011694 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011695
11696 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011697}
11698
11699static void bnx2x_tx_timeout(struct net_device *dev)
11700{
11701 struct bnx2x *bp = netdev_priv(dev);
11702
11703#ifdef BNX2X_STOP_ON_ERROR
11704 if (!bp->panic)
11705 bnx2x_panic();
11706#endif
11707 /* This allows the netif to be shutdown gracefully before resetting */
11708 schedule_work(&bp->reset_task);
11709}
11710
11711#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011712/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011713static void bnx2x_vlan_rx_register(struct net_device *dev,
11714 struct vlan_group *vlgrp)
11715{
11716 struct bnx2x *bp = netdev_priv(dev);
11717
11718 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011719
11720 /* Set flags according to the required capabilities */
11721 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11722
11723 if (dev->features & NETIF_F_HW_VLAN_TX)
11724 bp->flags |= HW_VLAN_TX_FLAG;
11725
11726 if (dev->features & NETIF_F_HW_VLAN_RX)
11727 bp->flags |= HW_VLAN_RX_FLAG;
11728
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011729 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011730 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011731}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011732
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011733#endif
11734
11735#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11736static void poll_bnx2x(struct net_device *dev)
11737{
11738 struct bnx2x *bp = netdev_priv(dev);
11739
11740 disable_irq(bp->pdev->irq);
11741 bnx2x_interrupt(bp->pdev->irq, dev);
11742 enable_irq(bp->pdev->irq);
11743}
11744#endif
11745
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011746static const struct net_device_ops bnx2x_netdev_ops = {
11747 .ndo_open = bnx2x_open,
11748 .ndo_stop = bnx2x_close,
11749 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011750 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011751 .ndo_set_mac_address = bnx2x_change_mac_addr,
11752 .ndo_validate_addr = eth_validate_addr,
11753 .ndo_do_ioctl = bnx2x_ioctl,
11754 .ndo_change_mtu = bnx2x_change_mtu,
11755 .ndo_tx_timeout = bnx2x_tx_timeout,
11756#ifdef BCM_VLAN
11757 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11758#endif
11759#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11760 .ndo_poll_controller = poll_bnx2x,
11761#endif
11762};
11763
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011764static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11765 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011766{
11767 struct bnx2x *bp;
11768 int rc;
11769
11770 SET_NETDEV_DEV(dev, &pdev->dev);
11771 bp = netdev_priv(dev);
11772
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011773 bp->dev = dev;
11774 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011775 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011776 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011777
11778 rc = pci_enable_device(pdev);
11779 if (rc) {
11780 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11781 goto err_out;
11782 }
11783
11784 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11785 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11786 " aborting\n");
11787 rc = -ENODEV;
11788 goto err_out_disable;
11789 }
11790
11791 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11792 printk(KERN_ERR PFX "Cannot find second PCI device"
11793 " base address, aborting\n");
11794 rc = -ENODEV;
11795 goto err_out_disable;
11796 }
11797
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011798 if (atomic_read(&pdev->enable_cnt) == 1) {
11799 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11800 if (rc) {
11801 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11802 " aborting\n");
11803 goto err_out_disable;
11804 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011805
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011806 pci_set_master(pdev);
11807 pci_save_state(pdev);
11808 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011809
11810 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11811 if (bp->pm_cap == 0) {
11812 printk(KERN_ERR PFX "Cannot find power management"
11813 " capability, aborting\n");
11814 rc = -EIO;
11815 goto err_out_release;
11816 }
11817
11818 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11819 if (bp->pcie_cap == 0) {
11820 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11821 " aborting\n");
11822 rc = -EIO;
11823 goto err_out_release;
11824 }
11825
Yang Hongyang6a355282009-04-06 19:01:13 -070011826 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011827 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011828 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011829 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11830 " failed, aborting\n");
11831 rc = -EIO;
11832 goto err_out_release;
11833 }
11834
Yang Hongyang284901a2009-04-06 19:01:15 -070011835 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011836 printk(KERN_ERR PFX "System does not support DMA,"
11837 " aborting\n");
11838 rc = -EIO;
11839 goto err_out_release;
11840 }
11841
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011842 dev->mem_start = pci_resource_start(pdev, 0);
11843 dev->base_addr = dev->mem_start;
11844 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011845
11846 dev->irq = pdev->irq;
11847
Arjan van de Ven275f1652008-10-20 21:42:39 -070011848 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011849 if (!bp->regview) {
11850 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11851 rc = -ENOMEM;
11852 goto err_out_release;
11853 }
11854
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011855 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11856 min_t(u64, BNX2X_DB_SIZE,
11857 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011858 if (!bp->doorbells) {
11859 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11860 rc = -ENOMEM;
11861 goto err_out_unmap;
11862 }
11863
11864 bnx2x_set_power_state(bp, PCI_D0);
11865
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011866 /* clean indirect addresses */
11867 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11868 PCICFG_VENDOR_ID_OFFSET);
11869 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11870 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11871 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11872 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011873
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011874 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011875
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011876 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011877 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011878 dev->features |= NETIF_F_SG;
11879 dev->features |= NETIF_F_HW_CSUM;
11880 if (bp->flags & USING_DAC_FLAG)
11881 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011882 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11883 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011884#ifdef BCM_VLAN
11885 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011886 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011887
11888 dev->vlan_features |= NETIF_F_SG;
11889 dev->vlan_features |= NETIF_F_HW_CSUM;
11890 if (bp->flags & USING_DAC_FLAG)
11891 dev->vlan_features |= NETIF_F_HIGHDMA;
11892 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11893 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011894#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011895
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011896 /* get_port_hwinfo() will set prtad and mmds properly */
11897 bp->mdio.prtad = MDIO_PRTAD_NONE;
11898 bp->mdio.mmds = 0;
11899 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11900 bp->mdio.dev = dev;
11901 bp->mdio.mdio_read = bnx2x_mdio_read;
11902 bp->mdio.mdio_write = bnx2x_mdio_write;
11903
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011904 return 0;
11905
11906err_out_unmap:
11907 if (bp->regview) {
11908 iounmap(bp->regview);
11909 bp->regview = NULL;
11910 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011911 if (bp->doorbells) {
11912 iounmap(bp->doorbells);
11913 bp->doorbells = NULL;
11914 }
11915
11916err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011917 if (atomic_read(&pdev->enable_cnt) == 1)
11918 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011919
11920err_out_disable:
11921 pci_disable_device(pdev);
11922 pci_set_drvdata(pdev, NULL);
11923
11924err_out:
11925 return rc;
11926}
11927
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011928static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11929 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011930{
11931 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11932
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011933 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11934
11935 /* return value of 1=2.5GHz 2=5GHz */
11936 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011937}
11938
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011939static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11940{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011941 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011942 struct bnx2x_fw_file_hdr *fw_hdr;
11943 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011944 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011945 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011946 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011947 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011948
11949 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11950 return -EINVAL;
11951
11952 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11953 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11954
11955 /* Make sure none of the offsets and sizes make us read beyond
11956 * the end of the firmware data */
11957 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11958 offset = be32_to_cpu(sections[i].offset);
11959 len = be32_to_cpu(sections[i].len);
11960 if (offset + len > firmware->size) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011961 printk(KERN_ERR PFX "Section %d length is out of "
11962 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011963 return -EINVAL;
11964 }
11965 }
11966
11967 /* Likewise for the init_ops offsets */
11968 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11969 ops_offsets = (u16 *)(firmware->data + offset);
11970 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11971
11972 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11973 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011974 printk(KERN_ERR PFX "Section offset %d is out of "
11975 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011976 return -EINVAL;
11977 }
11978 }
11979
11980 /* Check FW version */
11981 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11982 fw_ver = firmware->data + offset;
11983 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11984 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11985 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11986 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11987 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11988 " Should be %d.%d.%d.%d\n",
11989 fw_ver[0], fw_ver[1], fw_ver[2],
11990 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11991 BCM_5710_FW_MINOR_VERSION,
11992 BCM_5710_FW_REVISION_VERSION,
11993 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011994 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011995 }
11996
11997 return 0;
11998}
11999
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012000static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012001{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012002 const __be32 *source = (const __be32 *)_source;
12003 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012004 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012005
12006 for (i = 0; i < n/4; i++)
12007 target[i] = be32_to_cpu(source[i]);
12008}
12009
12010/*
12011 Ops array is stored in the following format:
12012 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12013 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012014static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012015{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012016 const __be32 *source = (const __be32 *)_source;
12017 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012018 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012019
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012020 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012021 tmp = be32_to_cpu(source[j]);
12022 target[i].op = (tmp >> 24) & 0xff;
12023 target[i].offset = tmp & 0xffffff;
12024 target[i].raw_data = be32_to_cpu(source[j+1]);
12025 }
12026}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012027
12028static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012029{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012030 const __be16 *source = (const __be16 *)_source;
12031 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012032 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012033
12034 for (i = 0; i < n/2; i++)
12035 target[i] = be16_to_cpu(source[i]);
12036}
12037
12038#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012039 do { \
12040 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12041 bp->arr = kmalloc(len, GFP_KERNEL); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012042 if (!bp->arr) { \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012043 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12044 "for "#arr"\n", len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012045 goto lbl; \
12046 } \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012047 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12048 (u8 *)bp->arr, len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012049 } while (0)
12050
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012051static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12052{
Ben Hutchings45229b42009-11-07 11:53:39 +000012053 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012054 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012055 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012056
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012057 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012058 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012059 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012060 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012061
12062 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12063
12064 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12065 if (rc) {
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012066 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12067 fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012068 goto request_firmware_exit;
12069 }
12070
12071 rc = bnx2x_check_firmware(bp);
12072 if (rc) {
12073 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12074 goto request_firmware_exit;
12075 }
12076
12077 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12078
12079 /* Initialize the pointers to the init arrays */
12080 /* Blob */
12081 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12082
12083 /* Opcodes */
12084 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12085
12086 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012087 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12088 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012089
12090 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012091 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12092 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12093 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12094 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12095 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12096 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12097 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12098 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12099 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12100 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12101 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12102 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12103 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12104 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12105 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12106 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012107
12108 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012109
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012110init_offsets_alloc_err:
12111 kfree(bp->init_ops);
12112init_ops_alloc_err:
12113 kfree(bp->init_data);
12114request_firmware_exit:
12115 release_firmware(bp->firmware);
12116
12117 return rc;
12118}
12119
12120
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012121static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12122 const struct pci_device_id *ent)
12123{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012124 struct net_device *dev = NULL;
12125 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012126 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012127 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012129 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012131 if (!dev) {
12132 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012133 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012134 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012135
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012136 bp = netdev_priv(dev);
12137 bp->msglevel = debug;
12138
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012139 pci_set_drvdata(pdev, dev);
12140
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012141 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012142 if (rc < 0) {
12143 free_netdev(dev);
12144 return rc;
12145 }
12146
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012147 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012148 if (rc)
12149 goto init_one_exit;
12150
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012151 /* Set init arrays */
12152 rc = bnx2x_init_firmware(bp, &pdev->dev);
12153 if (rc) {
12154 printk(KERN_ERR PFX "Error loading firmware\n");
12155 goto init_one_exit;
12156 }
12157
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012158 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012159 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012160 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012161 goto init_one_exit;
12162 }
12163
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Eliezer Tamir25047952008-02-28 11:50:16 -080012165 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000012166 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
Eliezer Tamir25047952008-02-28 11:50:16 -080012169 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070012170 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012171
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012172 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012173
12174init_one_exit:
12175 if (bp->regview)
12176 iounmap(bp->regview);
12177
12178 if (bp->doorbells)
12179 iounmap(bp->doorbells);
12180
12181 free_netdev(dev);
12182
12183 if (atomic_read(&pdev->enable_cnt) == 1)
12184 pci_release_regions(pdev);
12185
12186 pci_disable_device(pdev);
12187 pci_set_drvdata(pdev, NULL);
12188
12189 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012190}
12191
12192static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12193{
12194 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012195 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012196
Eliezer Tamir228241e2008-02-28 11:56:57 -080012197 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080012198 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12199 return;
12200 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012201 bp = netdev_priv(dev);
12202
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012203 unregister_netdev(dev);
12204
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012205 kfree(bp->init_ops_offsets);
12206 kfree(bp->init_ops);
12207 kfree(bp->init_data);
12208 release_firmware(bp->firmware);
12209
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012210 if (bp->regview)
12211 iounmap(bp->regview);
12212
12213 if (bp->doorbells)
12214 iounmap(bp->doorbells);
12215
12216 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012217
12218 if (atomic_read(&pdev->enable_cnt) == 1)
12219 pci_release_regions(pdev);
12220
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012221 pci_disable_device(pdev);
12222 pci_set_drvdata(pdev, NULL);
12223}
12224
12225static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12226{
12227 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012228 struct bnx2x *bp;
12229
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012230 if (!dev) {
12231 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12232 return -ENODEV;
12233 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012234 bp = netdev_priv(dev);
12235
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012236 rtnl_lock();
12237
12238 pci_save_state(pdev);
12239
12240 if (!netif_running(dev)) {
12241 rtnl_unlock();
12242 return 0;
12243 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012244
12245 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012246
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012247 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012248
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012249 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012250
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012251 rtnl_unlock();
12252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012253 return 0;
12254}
12255
12256static int bnx2x_resume(struct pci_dev *pdev)
12257{
12258 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012259 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012260 int rc;
12261
Eliezer Tamir228241e2008-02-28 11:56:57 -080012262 if (!dev) {
12263 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12264 return -ENODEV;
12265 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012266 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012267
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012268 rtnl_lock();
12269
Eliezer Tamir228241e2008-02-28 11:56:57 -080012270 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012271
12272 if (!netif_running(dev)) {
12273 rtnl_unlock();
12274 return 0;
12275 }
12276
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012277 bnx2x_set_power_state(bp, PCI_D0);
12278 netif_device_attach(dev);
12279
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012280 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012281
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012282 rtnl_unlock();
12283
12284 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012285}
12286
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012287static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12288{
12289 int i;
12290
12291 bp->state = BNX2X_STATE_ERROR;
12292
12293 bp->rx_mode = BNX2X_RX_MODE_NONE;
12294
12295 bnx2x_netif_stop(bp, 0);
12296
12297 del_timer_sync(&bp->timer);
12298 bp->stats_state = STATS_STATE_DISABLED;
12299 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12300
12301 /* Release IRQs */
12302 bnx2x_free_irq(bp);
12303
12304 if (CHIP_IS_E1(bp)) {
12305 struct mac_configuration_cmd *config =
12306 bnx2x_sp(bp, mcast_config);
12307
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012308 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012309 CAM_INVALIDATE(config->config_table[i]);
12310 }
12311
12312 /* Free SKBs, SGEs, TPA pool and driver internals */
12313 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012314 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012315 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012316 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012317 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012318 bnx2x_free_mem(bp);
12319
12320 bp->state = BNX2X_STATE_CLOSED;
12321
12322 netif_carrier_off(bp->dev);
12323
12324 return 0;
12325}
12326
12327static void bnx2x_eeh_recover(struct bnx2x *bp)
12328{
12329 u32 val;
12330
12331 mutex_init(&bp->port.phy_mutex);
12332
12333 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12334 bp->link_params.shmem_base = bp->common.shmem_base;
12335 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12336
12337 if (!bp->common.shmem_base ||
12338 (bp->common.shmem_base < 0xA0000) ||
12339 (bp->common.shmem_base >= 0xC0000)) {
12340 BNX2X_DEV_INFO("MCP not active\n");
12341 bp->flags |= NO_MCP_FLAG;
12342 return;
12343 }
12344
12345 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12346 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12348 BNX2X_ERR("BAD MCP validity signature\n");
12349
12350 if (!BP_NOMCP(bp)) {
12351 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12352 & DRV_MSG_SEQ_NUMBER_MASK);
12353 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12354 }
12355}
12356
Wendy Xiong493adb12008-06-23 20:36:22 -070012357/**
12358 * bnx2x_io_error_detected - called when PCI error is detected
12359 * @pdev: Pointer to PCI device
12360 * @state: The current pci connection state
12361 *
12362 * This function is called after a PCI bus error affecting
12363 * this device has been detected.
12364 */
12365static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12366 pci_channel_state_t state)
12367{
12368 struct net_device *dev = pci_get_drvdata(pdev);
12369 struct bnx2x *bp = netdev_priv(dev);
12370
12371 rtnl_lock();
12372
12373 netif_device_detach(dev);
12374
Dean Nelson07ce50e2009-07-31 09:13:25 +000012375 if (state == pci_channel_io_perm_failure) {
12376 rtnl_unlock();
12377 return PCI_ERS_RESULT_DISCONNECT;
12378 }
12379
Wendy Xiong493adb12008-06-23 20:36:22 -070012380 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012381 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012382
12383 pci_disable_device(pdev);
12384
12385 rtnl_unlock();
12386
12387 /* Request a slot reset */
12388 return PCI_ERS_RESULT_NEED_RESET;
12389}
12390
12391/**
12392 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12393 * @pdev: Pointer to PCI device
12394 *
12395 * Restart the card from scratch, as if from a cold-boot.
12396 */
12397static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12398{
12399 struct net_device *dev = pci_get_drvdata(pdev);
12400 struct bnx2x *bp = netdev_priv(dev);
12401
12402 rtnl_lock();
12403
12404 if (pci_enable_device(pdev)) {
12405 dev_err(&pdev->dev,
12406 "Cannot re-enable PCI device after reset\n");
12407 rtnl_unlock();
12408 return PCI_ERS_RESULT_DISCONNECT;
12409 }
12410
12411 pci_set_master(pdev);
12412 pci_restore_state(pdev);
12413
12414 if (netif_running(dev))
12415 bnx2x_set_power_state(bp, PCI_D0);
12416
12417 rtnl_unlock();
12418
12419 return PCI_ERS_RESULT_RECOVERED;
12420}
12421
12422/**
12423 * bnx2x_io_resume - called when traffic can start flowing again
12424 * @pdev: Pointer to PCI device
12425 *
12426 * This callback is called when the error recovery driver tells us that
12427 * its OK to resume normal operation.
12428 */
12429static void bnx2x_io_resume(struct pci_dev *pdev)
12430{
12431 struct net_device *dev = pci_get_drvdata(pdev);
12432 struct bnx2x *bp = netdev_priv(dev);
12433
12434 rtnl_lock();
12435
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012436 bnx2x_eeh_recover(bp);
12437
Wendy Xiong493adb12008-06-23 20:36:22 -070012438 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012439 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012440
12441 netif_device_attach(dev);
12442
12443 rtnl_unlock();
12444}
12445
12446static struct pci_error_handlers bnx2x_err_handler = {
12447 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012448 .slot_reset = bnx2x_io_slot_reset,
12449 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012450};
12451
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012452static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012453 .name = DRV_MODULE_NAME,
12454 .id_table = bnx2x_pci_tbl,
12455 .probe = bnx2x_init_one,
12456 .remove = __devexit_p(bnx2x_remove_one),
12457 .suspend = bnx2x_suspend,
12458 .resume = bnx2x_resume,
12459 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012460};
12461
12462static int __init bnx2x_init(void)
12463{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012464 int ret;
12465
Eilon Greenstein938cf542009-08-12 08:23:37 +000012466 printk(KERN_INFO "%s", version);
12467
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012468 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12469 if (bnx2x_wq == NULL) {
12470 printk(KERN_ERR PFX "Cannot create workqueue\n");
12471 return -ENOMEM;
12472 }
12473
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012474 ret = pci_register_driver(&bnx2x_pci_driver);
12475 if (ret) {
12476 printk(KERN_ERR PFX "Cannot register driver\n");
12477 destroy_workqueue(bnx2x_wq);
12478 }
12479 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012480}
12481
12482static void __exit bnx2x_cleanup(void)
12483{
12484 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012485
12486 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012487}
12488
12489module_init(bnx2x_init);
12490module_exit(bnx2x_cleanup);
12491
Michael Chan993ac7b2009-10-10 13:46:56 +000012492#ifdef BCM_CNIC
12493
12494/* count denotes the number of new completions we have seen */
12495static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12496{
12497 struct eth_spe *spe;
12498
12499#ifdef BNX2X_STOP_ON_ERROR
12500 if (unlikely(bp->panic))
12501 return;
12502#endif
12503
12504 spin_lock_bh(&bp->spq_lock);
12505 bp->cnic_spq_pending -= count;
12506
12507 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12508 bp->cnic_spq_pending++) {
12509
12510 if (!bp->cnic_kwq_pending)
12511 break;
12512
12513 spe = bnx2x_sp_get_next(bp);
12514 *spe = *bp->cnic_kwq_cons;
12515
12516 bp->cnic_kwq_pending--;
12517
12518 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12519 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12520
12521 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12522 bp->cnic_kwq_cons = bp->cnic_kwq;
12523 else
12524 bp->cnic_kwq_cons++;
12525 }
12526 bnx2x_sp_prod_update(bp);
12527 spin_unlock_bh(&bp->spq_lock);
12528}
12529
12530static int bnx2x_cnic_sp_queue(struct net_device *dev,
12531 struct kwqe_16 *kwqes[], u32 count)
12532{
12533 struct bnx2x *bp = netdev_priv(dev);
12534 int i;
12535
12536#ifdef BNX2X_STOP_ON_ERROR
12537 if (unlikely(bp->panic))
12538 return -EIO;
12539#endif
12540
12541 spin_lock_bh(&bp->spq_lock);
12542
12543 for (i = 0; i < count; i++) {
12544 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12545
12546 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12547 break;
12548
12549 *bp->cnic_kwq_prod = *spe;
12550
12551 bp->cnic_kwq_pending++;
12552
12553 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12554 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12555 spe->data.mac_config_addr.hi,
12556 spe->data.mac_config_addr.lo,
12557 bp->cnic_kwq_pending);
12558
12559 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12560 bp->cnic_kwq_prod = bp->cnic_kwq;
12561 else
12562 bp->cnic_kwq_prod++;
12563 }
12564
12565 spin_unlock_bh(&bp->spq_lock);
12566
12567 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12568 bnx2x_cnic_sp_post(bp, 0);
12569
12570 return i;
12571}
12572
12573static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12574{
12575 struct cnic_ops *c_ops;
12576 int rc = 0;
12577
12578 mutex_lock(&bp->cnic_mutex);
12579 c_ops = bp->cnic_ops;
12580 if (c_ops)
12581 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12582 mutex_unlock(&bp->cnic_mutex);
12583
12584 return rc;
12585}
12586
12587static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12588{
12589 struct cnic_ops *c_ops;
12590 int rc = 0;
12591
12592 rcu_read_lock();
12593 c_ops = rcu_dereference(bp->cnic_ops);
12594 if (c_ops)
12595 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12596 rcu_read_unlock();
12597
12598 return rc;
12599}
12600
12601/*
12602 * for commands that have no data
12603 */
12604static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12605{
12606 struct cnic_ctl_info ctl = {0};
12607
12608 ctl.cmd = cmd;
12609
12610 return bnx2x_cnic_ctl_send(bp, &ctl);
12611}
12612
12613static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12614{
12615 struct cnic_ctl_info ctl;
12616
12617 /* first we tell CNIC and only then we count this as a completion */
12618 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12619 ctl.data.comp.cid = cid;
12620
12621 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12622 bnx2x_cnic_sp_post(bp, 1);
12623}
12624
12625static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12626{
12627 struct bnx2x *bp = netdev_priv(dev);
12628 int rc = 0;
12629
12630 switch (ctl->cmd) {
12631 case DRV_CTL_CTXTBL_WR_CMD: {
12632 u32 index = ctl->data.io.offset;
12633 dma_addr_t addr = ctl->data.io.dma_addr;
12634
12635 bnx2x_ilt_wr(bp, index, addr);
12636 break;
12637 }
12638
12639 case DRV_CTL_COMPLETION_CMD: {
12640 int count = ctl->data.comp.comp_count;
12641
12642 bnx2x_cnic_sp_post(bp, count);
12643 break;
12644 }
12645
12646 /* rtnl_lock is held. */
12647 case DRV_CTL_START_L2_CMD: {
12648 u32 cli = ctl->data.ring.client_id;
12649
12650 bp->rx_mode_cl_mask |= (1 << cli);
12651 bnx2x_set_storm_rx_mode(bp);
12652 break;
12653 }
12654
12655 /* rtnl_lock is held. */
12656 case DRV_CTL_STOP_L2_CMD: {
12657 u32 cli = ctl->data.ring.client_id;
12658
12659 bp->rx_mode_cl_mask &= ~(1 << cli);
12660 bnx2x_set_storm_rx_mode(bp);
12661 break;
12662 }
12663
12664 default:
12665 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12666 rc = -EINVAL;
12667 }
12668
12669 return rc;
12670}
12671
12672static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12673{
12674 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12675
12676 if (bp->flags & USING_MSIX_FLAG) {
12677 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12678 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12679 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12680 } else {
12681 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12682 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12683 }
12684 cp->irq_arr[0].status_blk = bp->cnic_sb;
12685 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12686 cp->irq_arr[1].status_blk = bp->def_status_blk;
12687 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12688
12689 cp->num_irq = 2;
12690}
12691
12692static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12693 void *data)
12694{
12695 struct bnx2x *bp = netdev_priv(dev);
12696 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12697
12698 if (ops == NULL)
12699 return -EINVAL;
12700
12701 if (atomic_read(&bp->intr_sem) != 0)
12702 return -EBUSY;
12703
12704 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12705 if (!bp->cnic_kwq)
12706 return -ENOMEM;
12707
12708 bp->cnic_kwq_cons = bp->cnic_kwq;
12709 bp->cnic_kwq_prod = bp->cnic_kwq;
12710 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12711
12712 bp->cnic_spq_pending = 0;
12713 bp->cnic_kwq_pending = 0;
12714
12715 bp->cnic_data = data;
12716
12717 cp->num_irq = 0;
12718 cp->drv_state = CNIC_DRV_STATE_REGD;
12719
12720 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12721
12722 bnx2x_setup_cnic_irq_info(bp);
12723 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12724 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12725 rcu_assign_pointer(bp->cnic_ops, ops);
12726
12727 return 0;
12728}
12729
12730static int bnx2x_unregister_cnic(struct net_device *dev)
12731{
12732 struct bnx2x *bp = netdev_priv(dev);
12733 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12734
12735 mutex_lock(&bp->cnic_mutex);
12736 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12737 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12738 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12739 }
12740 cp->drv_state = 0;
12741 rcu_assign_pointer(bp->cnic_ops, NULL);
12742 mutex_unlock(&bp->cnic_mutex);
12743 synchronize_rcu();
12744 kfree(bp->cnic_kwq);
12745 bp->cnic_kwq = NULL;
12746
12747 return 0;
12748}
12749
12750struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12751{
12752 struct bnx2x *bp = netdev_priv(dev);
12753 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12754
12755 cp->drv_owner = THIS_MODULE;
12756 cp->chip_id = CHIP_ID(bp);
12757 cp->pdev = bp->pdev;
12758 cp->io_base = bp->regview;
12759 cp->io_base2 = bp->doorbells;
12760 cp->max_kwqe_pending = 8;
12761 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12762 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12763 cp->ctx_tbl_len = CNIC_ILT_LINES;
12764 cp->starting_cid = BCM_CNIC_CID_START;
12765 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12766 cp->drv_ctl = bnx2x_drv_ctl;
12767 cp->drv_register_cnic = bnx2x_register_cnic;
12768 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12769
12770 return cp;
12771}
12772EXPORT_SYMBOL(bnx2x_cnic_probe);
12773
12774#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012775