blob: 2b6717748eb83a9c4d26ec08e3e5ac59649683c8 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -070060#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perches7995c642010-02-17 15:01:52 +0000517 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perches7995c642010-02-17 15:01:52 +0000519 pr_err("");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000525 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000532 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perches7995c642010-02-17 15:01:52 +0000534 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +0000767/* Return true if succeeded to acquire the lock */
768static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
769{
770 u32 lock_status;
771 u32 resource_bit = (1 << resource);
772 int func = BP_FUNC(bp);
773 u32 hw_lock_control_reg;
774
775 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
776
777 /* Validating that the resource is within range */
778 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
779 DP(NETIF_MSG_HW,
780 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
781 resource, HW_LOCK_MAX_RESOURCE_VALUE);
782 return -EINVAL;
783 }
784
785 if (func <= 5)
786 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
787 else
788 hw_lock_control_reg =
789 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
790
791 /* Try to acquire the lock */
792 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
793 lock_status = REG_RD(bp, hw_lock_control_reg);
794 if (lock_status & resource_bit)
795 return true;
796
797 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
798 return false;
799}
800
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700801static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200802 u8 storm, u16 index, u8 op, u8 update)
803{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700804 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
805 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200806 struct igu_ack_register igu_ack;
807
808 igu_ack.status_block_index = index;
809 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700810 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200811 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
812 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
813 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
814
Eilon Greenstein5c862842008-08-13 15:51:48 -0700815 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
816 (*(u32 *)&igu_ack), hc_addr);
817 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000818
819 /* Make sure that ACK is written */
820 mmiowb();
821 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200822}
823
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000824static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200825{
826 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200827
828 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000829 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
830 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200831}
832
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200833static u16 bnx2x_ack_int(struct bnx2x *bp)
834{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700835 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
836 COMMAND_REG_SIMD_MASK);
837 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838
Eilon Greenstein5c862842008-08-13 15:51:48 -0700839 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
840 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200842 return result;
843}
844
845
846/*
847 * fast path service functions
848 */
849
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800850static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
851{
852 /* Tell compiler that consumer and producer can change */
853 barrier();
854 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000855}
856
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200857/* free skb in the packet ring at pos idx
858 * return idx of last bd freed
859 */
860static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
861 u16 idx)
862{
863 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700864 struct eth_tx_start_bd *tx_start_bd;
865 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200866 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700867 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 int nbd;
869
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000870 /* prefetch skb end pointer to speedup dev_kfree_skb() */
871 prefetch(&skb->end);
872
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200873 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
874 idx, tx_buf, skb);
875
876 /* unmap first bd */
877 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700878 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
FUJITA Tomonori1a983142010-04-04 01:51:03 +0000879 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Eilon Greensteinca003922009-08-12 22:53:28 -0700880 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881
Eilon Greensteinca003922009-08-12 22:53:28 -0700882 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200883#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700884 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700885 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 bnx2x_panic();
887 }
888#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700889 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200890
Eilon Greensteinca003922009-08-12 22:53:28 -0700891 /* Get the next bd */
892 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
893
894 /* Skip a parse bd... */
895 --nbd;
896 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
897
898 /* ...and the TSO split header bd since they have no mapping */
899 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
900 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200901 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200902 }
903
904 /* now free frags */
905 while (nbd > 0) {
906
907 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700908 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
FUJITA Tomonori1a983142010-04-04 01:51:03 +0000909 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
910 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911 if (--nbd)
912 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
913 }
914
915 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700916 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000917 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200918 tx_buf->first_bd = 0;
919 tx_buf->skb = NULL;
920
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700921 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200922}
923
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700924static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200925{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700926 s16 used;
927 u16 prod;
928 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200929
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200930 prod = fp->tx_bd_prod;
931 cons = fp->tx_bd_cons;
932
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700933 /* NUM_TX_RINGS = number of "next-page" entries
934 It will be used as a threshold */
935 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200936
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700937#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700938 WARN_ON(used < 0);
939 WARN_ON(used > fp->bp->tx_ring_size);
940 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700941#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200942
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700943 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200944}
945
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000946static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
947{
948 u16 hw_cons;
949
950 /* Tell compiler that status block fields can change */
951 barrier();
952 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
953 return hw_cons != fp->tx_pkt_cons;
954}
955
956static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200957{
958 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000959 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200961
962#ifdef BNX2X_STOP_ON_ERROR
963 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000964 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200965#endif
966
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000967 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
969 sw_cons = fp->tx_pkt_cons;
970
971 while (sw_cons != hw_cons) {
972 u16 pkt_cons;
973
974 pkt_cons = TX_BD(sw_cons);
975
976 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
977
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700978 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200979 hw_cons, sw_cons, pkt_cons);
980
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700981/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200982 rmb();
983 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
984 }
985*/
986 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
987 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200988 }
989
990 fp->tx_pkt_cons = sw_cons;
991 fp->tx_bd_cons = bd_cons;
992
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +0000993 /* Need to make the tx_bd_cons update visible to start_xmit()
994 * before checking for netif_tx_queue_stopped(). Without the
995 * memory barrier, there is a small possibility that
996 * start_xmit() will miss it and cause the queue to be stopped
997 * forever.
998 */
Stanislaw Gruszka2d99cf12010-03-09 06:55:00 +0000999 smp_mb();
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001000
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001001 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001002 if (unlikely(netif_tx_queue_stopped(txq))) {
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001003 /* Taking tx_lock() is needed to prevent reenabling the queue
1004 * while it's empty. This could have happen if rx_action() gets
1005 * suspended in bnx2x_tx_int() after the condition before
1006 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1007 *
1008 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1009 * sends some packets consuming the whole queue again->
1010 * stops the queue
Eilon Greenstein60447352009-03-02 07:59:24 +00001011 */
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001012
1013 __netif_tx_lock(txq, smp_processor_id());
Eilon Greenstein60447352009-03-02 07:59:24 +00001014
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001015 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001016 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001017 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001018 netif_tx_wake_queue(txq);
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +00001019
1020 __netif_tx_unlock(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001021 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001022 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001023}
1024
Michael Chan993ac7b2009-10-10 13:46:56 +00001025#ifdef BCM_CNIC
1026static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1027#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001028
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001029static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1030 union eth_rx_cqe *rr_cqe)
1031{
1032 struct bnx2x *bp = fp->bp;
1033 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1034 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1035
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001036 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001037 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001038 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001039 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040
1041 bp->spq_left++;
1042
Eilon Greenstein0626b892009-02-12 08:38:14 +00001043 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001044 switch (command | fp->state) {
1045 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1046 BNX2X_FP_STATE_OPENING):
1047 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1048 cid);
1049 fp->state = BNX2X_FP_STATE_OPEN;
1050 break;
1051
1052 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1053 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1054 cid);
1055 fp->state = BNX2X_FP_STATE_HALTED;
1056 break;
1057
1058 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 BNX2X_ERR("unexpected MC reply (%d) "
1060 "fp->state is %x\n", command, fp->state);
1061 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001063 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064 return;
1065 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001066
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001067 switch (command | bp->state) {
1068 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1069 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1070 bp->state = BNX2X_STATE_OPEN;
1071 break;
1072
1073 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1074 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1075 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1076 fp->state = BNX2X_FP_STATE_HALTED;
1077 break;
1078
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001079 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001080 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001081 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001082 break;
1083
Michael Chan993ac7b2009-10-10 13:46:56 +00001084#ifdef BCM_CNIC
1085 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1086 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1087 bnx2x_cnic_cfc_comp(bp, cid);
1088 break;
1089#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001090
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001092 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001094 bp->set_mac_pending--;
1095 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001096 break;
1097
Eliezer Tamir49d66772008-02-28 11:53:13 -08001098 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001099 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001100 bp->set_mac_pending--;
1101 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001102 break;
1103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001104 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001105 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001106 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001107 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001109 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001110}
1111
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001112static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1113 struct bnx2x_fastpath *fp, u16 index)
1114{
1115 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1116 struct page *page = sw_buf->page;
1117 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1118
1119 /* Skip "next page" elements */
1120 if (!page)
1121 return;
1122
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001123 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001124 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001125 __free_pages(page, PAGES_PER_SGE_SHIFT);
1126
1127 sw_buf->page = NULL;
1128 sge->addr_hi = 0;
1129 sge->addr_lo = 0;
1130}
1131
1132static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1133 struct bnx2x_fastpath *fp, int last)
1134{
1135 int i;
1136
1137 for (i = 0; i < last; i++)
1138 bnx2x_free_rx_sge(bp, fp, i);
1139}
1140
1141static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, u16 index)
1143{
1144 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1145 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1146 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1147 dma_addr_t mapping;
1148
1149 if (unlikely(page == NULL))
1150 return -ENOMEM;
1151
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001152 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1153 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001154 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001155 __free_pages(page, PAGES_PER_SGE_SHIFT);
1156 return -ENOMEM;
1157 }
1158
1159 sw_buf->page = page;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001160 dma_unmap_addr_set(sw_buf, mapping, mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001161
1162 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1163 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1164
1165 return 0;
1166}
1167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001168static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1169 struct bnx2x_fastpath *fp, u16 index)
1170{
1171 struct sk_buff *skb;
1172 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1173 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1174 dma_addr_t mapping;
1175
1176 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1177 if (unlikely(skb == NULL))
1178 return -ENOMEM;
1179
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001180 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1181 DMA_FROM_DEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001182 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001183 dev_kfree_skb(skb);
1184 return -ENOMEM;
1185 }
1186
1187 rx_buf->skb = skb;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001188 dma_unmap_addr_set(rx_buf, mapping, mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001189
1190 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1191 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1192
1193 return 0;
1194}
1195
1196/* note that we are not allocating a new skb,
1197 * we are just moving one from cons to prod
1198 * we are not creating a new mapping,
1199 * so there is no need to check for dma_mapping_error().
1200 */
1201static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1202 struct sk_buff *skb, u16 cons, u16 prod)
1203{
1204 struct bnx2x *bp = fp->bp;
1205 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1206 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1207 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1208 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1209
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001210 dma_sync_single_for_device(&bp->pdev->dev,
1211 dma_unmap_addr(cons_rx_buf, mapping),
1212 RX_COPY_THRESH, DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001213
1214 prod_rx_buf->skb = cons_rx_buf->skb;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001215 dma_unmap_addr_set(prod_rx_buf, mapping,
1216 dma_unmap_addr(cons_rx_buf, mapping));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001217 *prod_bd = *cons_bd;
1218}
1219
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001220static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1221 u16 idx)
1222{
1223 u16 last_max = fp->last_max_sge;
1224
1225 if (SUB_S16(idx, last_max) > 0)
1226 fp->last_max_sge = idx;
1227}
1228
1229static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1230{
1231 int i, j;
1232
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 int idx = RX_SGE_CNT * i - 1;
1235
1236 for (j = 0; j < 2; j++) {
1237 SGE_MASK_CLEAR_BIT(fp, idx);
1238 idx--;
1239 }
1240 }
1241}
1242
1243static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1244 struct eth_fast_path_rx_cqe *fp_cqe)
1245{
1246 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001247 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001248 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001249 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001250 u16 last_max, last_elem, first_elem;
1251 u16 delta = 0;
1252 u16 i;
1253
1254 if (!sge_len)
1255 return;
1256
1257 /* First mark all used pages */
1258 for (i = 0; i < sge_len; i++)
1259 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1260
1261 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1262 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1263
1264 /* Here we assume that the last SGE index is the biggest */
1265 prefetch((void *)(fp->sge_mask));
1266 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1267
1268 last_max = RX_SGE(fp->last_max_sge);
1269 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1270 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1271
1272 /* If ring is not full */
1273 if (last_elem + 1 != first_elem)
1274 last_elem++;
1275
1276 /* Now update the prod */
1277 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1278 if (likely(fp->sge_mask[i]))
1279 break;
1280
1281 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1282 delta += RX_SGE_MASK_ELEM_SZ;
1283 }
1284
1285 if (delta > 0) {
1286 fp->rx_sge_prod += delta;
1287 /* clear page-end entries */
1288 bnx2x_clear_sge_mask_next_elems(fp);
1289 }
1290
1291 DP(NETIF_MSG_RX_STATUS,
1292 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1293 fp->last_max_sge, fp->rx_sge_prod);
1294}
1295
1296static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1297{
1298 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1299 memset(fp->sge_mask, 0xff,
1300 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1301
Eilon Greenstein33471622008-08-13 15:59:08 -07001302 /* Clear the two last indices in the page to 1:
1303 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001304 hence will never be indicated and should be removed from
1305 the calculations. */
1306 bnx2x_clear_sge_mask_next_elems(fp);
1307}
1308
1309static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1310 struct sk_buff *skb, u16 cons, u16 prod)
1311{
1312 struct bnx2x *bp = fp->bp;
1313 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1314 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1315 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1316 dma_addr_t mapping;
1317
1318 /* move empty skb from pool to prod and map it */
1319 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001320 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1321 bp->rx_buf_size, DMA_FROM_DEVICE);
1322 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001323
1324 /* move partial skb from cons to pool (don't unmap yet) */
1325 fp->tpa_pool[queue] = *cons_rx_buf;
1326
1327 /* mark bin state as start - print error if current state != stop */
1328 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1330
1331 fp->tpa_state[queue] = BNX2X_TPA_START;
1332
1333 /* point prod_bd to new skb */
1334 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1335 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1336
1337#ifdef BNX2X_STOP_ON_ERROR
1338 fp->tpa_queue_used |= (1 << queue);
1339#ifdef __powerpc64__
1340 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1341#else
1342 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1343#endif
1344 fp->tpa_queue_used);
1345#endif
1346}
1347
1348static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1349 struct sk_buff *skb,
1350 struct eth_fast_path_rx_cqe *fp_cqe,
1351 u16 cqe_idx)
1352{
1353 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1355 u32 i, frag_len, frag_size, pages;
1356 int err;
1357 int j;
1358
1359 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001360 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001361
1362 /* This is needed in order to enable forwarding support */
1363 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001364 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001365 max(frag_size, (u32)len_on_bd));
1366
1367#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001368 if (pages >
1369 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001370 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1371 pages, cqe_idx);
1372 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1373 fp_cqe->pkt_len, len_on_bd);
1374 bnx2x_panic();
1375 return -EINVAL;
1376 }
1377#endif
1378
1379 /* Run through the SGL and compose the fragmented skb */
1380 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1381 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1382
1383 /* FW gives the indices of the SGE as if the ring is an array
1384 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001385 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001386 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001387 old_rx_pg = *rx_pg;
1388
1389 /* If we fail to allocate a substitute page, we simply stop
1390 where we are and drop the whole packet */
1391 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1392 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001393 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001394 return err;
1395 }
1396
1397 /* Unmap the page as we r going to pass it to the stack */
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001398 dma_unmap_page(&bp->pdev->dev,
1399 dma_unmap_addr(&old_rx_pg, mapping),
1400 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001401
1402 /* Add one frag and update the appropriate fields in the skb */
1403 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1404
1405 skb->data_len += frag_len;
1406 skb->truesize += frag_len;
1407 skb->len += frag_len;
1408
1409 frag_size -= frag_len;
1410 }
1411
1412 return 0;
1413}
1414
1415static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1416 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1417 u16 cqe_idx)
1418{
1419 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1420 struct sk_buff *skb = rx_buf->skb;
1421 /* alloc new skb */
1422 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1423
1424 /* Unmap skb in the pool anyway, as we are going to change
1425 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1426 fails. */
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001427 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1428 bp->rx_buf_size, DMA_FROM_DEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001429
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001430 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001431 /* fix ip xsum and give it to the stack */
1432 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001433#ifdef BCM_VLAN
1434 int is_vlan_cqe =
1435 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1436 PARSING_FLAGS_VLAN);
1437 int is_not_hwaccel_vlan_cqe =
1438 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1439#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001440
1441 prefetch(skb);
1442 prefetch(((char *)(skb)) + 128);
1443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001444#ifdef BNX2X_STOP_ON_ERROR
1445 if (pad + len > bp->rx_buf_size) {
1446 BNX2X_ERR("skb_put is about to fail... "
1447 "pad %d len %d rx_buf_size %d\n",
1448 pad, len, bp->rx_buf_size);
1449 bnx2x_panic();
1450 return;
1451 }
1452#endif
1453
1454 skb_reserve(skb, pad);
1455 skb_put(skb, len);
1456
1457 skb->protocol = eth_type_trans(skb, bp->dev);
1458 skb->ip_summed = CHECKSUM_UNNECESSARY;
1459
1460 {
1461 struct iphdr *iph;
1462
1463 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001464#ifdef BCM_VLAN
1465 /* If there is no Rx VLAN offloading -
1466 take VLAN tag into an account */
1467 if (unlikely(is_not_hwaccel_vlan_cqe))
1468 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1469#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001470 iph->check = 0;
1471 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1472 }
1473
1474 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1475 &cqe->fast_path_cqe, cqe_idx)) {
1476#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001477 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1478 (!is_not_hwaccel_vlan_cqe))
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07001479 vlan_gro_receive(&fp->napi, bp->vlgrp,
1480 le16_to_cpu(cqe->fast_path_cqe.
1481 vlan_tag), skb);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001482 else
1483#endif
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07001484 napi_gro_receive(&fp->napi, skb);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001485 } else {
1486 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1487 " - dropping packet!\n");
1488 dev_kfree_skb(skb);
1489 }
1490
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001491
1492 /* put new skb in bin */
1493 fp->tpa_pool[queue].skb = new_skb;
1494
1495 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001496 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001497 DP(NETIF_MSG_RX_STATUS,
1498 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001499 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001500 }
1501
1502 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1503}
1504
1505static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1506 struct bnx2x_fastpath *fp,
1507 u16 bd_prod, u16 rx_comp_prod,
1508 u16 rx_sge_prod)
1509{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001510 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001511 int i;
1512
1513 /* Update producers */
1514 rx_prods.bd_prod = bd_prod;
1515 rx_prods.cqe_prod = rx_comp_prod;
1516 rx_prods.sge_prod = rx_sge_prod;
1517
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001518 /*
1519 * Make sure that the BD and SGE data is updated before updating the
1520 * producers since FW might read the BD/SGE right after the producer
1521 * is updated.
1522 * This is only applicable for weak-ordered memory model archs such
1523 * as IA-64. The following barrier is also mandatory since FW will
1524 * assumes BDs must have buffers.
1525 */
1526 wmb();
1527
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001528 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1529 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001530 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001531 ((u32 *)&rx_prods)[i]);
1532
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001533 mmiowb(); /* keep prod updates ordered */
1534
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001535 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001536 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1537 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001538}
1539
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001540static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1541{
1542 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001543 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1545 int rx_pkt = 0;
1546
1547#ifdef BNX2X_STOP_ON_ERROR
1548 if (unlikely(bp->panic))
1549 return 0;
1550#endif
1551
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001552 /* CQ "next element" is of the size of the regular element,
1553 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001554 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1555 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1556 hw_comp_cons++;
1557
1558 bd_cons = fp->rx_bd_cons;
1559 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001560 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001561 sw_comp_cons = fp->rx_comp_cons;
1562 sw_comp_prod = fp->rx_comp_prod;
1563
1564 /* Memory barrier necessary as speculative reads of the rx
1565 * buffer can be ahead of the index in the status block
1566 */
1567 rmb();
1568
1569 DP(NETIF_MSG_RX_STATUS,
1570 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001571 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001572
1573 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001574 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001575 struct sk_buff *skb;
1576 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001577 u8 cqe_fp_flags;
1578 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001579
1580 comp_ring_cons = RCQ_BD(sw_comp_cons);
1581 bd_prod = RX_BD(bd_prod);
1582 bd_cons = RX_BD(bd_cons);
1583
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001584 /* Prefetch the page containing the BD descriptor
1585 at producer's index. It will be needed when new skb is
1586 allocated */
1587 prefetch((void *)(PAGE_ALIGN((unsigned long)
1588 (&fp->rx_desc_ring[bd_prod])) -
1589 PAGE_SIZE + 1));
1590
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001591 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001592 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001593
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001594 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001595 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1596 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001597 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001598 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1599 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001600
1601 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001602 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001603 bnx2x_sp_event(fp, cqe);
1604 goto next_cqe;
1605
1606 /* this is an rx packet */
1607 } else {
1608 rx_buf = &fp->rx_buf_ring[bd_cons];
1609 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001610 prefetch(skb);
1611 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001612 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1613 pad = cqe->fast_path_cqe.placement_offset;
1614
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001615 /* If CQE is marked both TPA_START and TPA_END
1616 it is a non-TPA CQE */
1617 if ((!fp->disable_tpa) &&
1618 (TPA_TYPE(cqe_fp_flags) !=
1619 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001620 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001621
1622 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1623 DP(NETIF_MSG_RX_STATUS,
1624 "calling tpa_start on queue %d\n",
1625 queue);
1626
1627 bnx2x_tpa_start(fp, queue, skb,
1628 bd_cons, bd_prod);
1629 goto next_rx;
1630 }
1631
1632 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1633 DP(NETIF_MSG_RX_STATUS,
1634 "calling tpa_stop on queue %d\n",
1635 queue);
1636
1637 if (!BNX2X_RX_SUM_FIX(cqe))
1638 BNX2X_ERR("STOP on none TCP "
1639 "data\n");
1640
1641 /* This is a size of the linear data
1642 on this skb */
1643 len = le16_to_cpu(cqe->fast_path_cqe.
1644 len_on_bd);
1645 bnx2x_tpa_stop(bp, fp, queue, pad,
1646 len, cqe, comp_ring_cons);
1647#ifdef BNX2X_STOP_ON_ERROR
1648 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001649 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001650#endif
1651
1652 bnx2x_update_sge_prod(fp,
1653 &cqe->fast_path_cqe);
1654 goto next_cqe;
1655 }
1656 }
1657
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001658 dma_sync_single_for_device(&bp->pdev->dev,
1659 dma_unmap_addr(rx_buf, mapping),
1660 pad + RX_COPY_THRESH,
1661 DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001662 prefetch(skb);
1663 prefetch(((char *)(skb)) + 128);
1664
1665 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001666 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001667 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001668 "ERROR flags %x rx packet %u\n",
1669 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001670 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001671 goto reuse_rx;
1672 }
1673
1674 /* Since we don't have a jumbo ring
1675 * copy small packets if mtu > 1500
1676 */
1677 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1678 (len <= RX_COPY_THRESH)) {
1679 struct sk_buff *new_skb;
1680
1681 new_skb = netdev_alloc_skb(bp->dev,
1682 len + pad);
1683 if (new_skb == NULL) {
1684 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001685 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001687 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001688 goto reuse_rx;
1689 }
1690
1691 /* aligned copy */
1692 skb_copy_from_linear_data_offset(skb, pad,
1693 new_skb->data + pad, len);
1694 skb_reserve(new_skb, pad);
1695 skb_put(new_skb, len);
1696
1697 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1698
1699 skb = new_skb;
1700
Eilon Greensteina119a062009-08-12 08:23:23 +00001701 } else
1702 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001703 dma_unmap_single(&bp->pdev->dev,
1704 dma_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001705 bp->rx_buf_size,
FUJITA Tomonori1a983142010-04-04 01:51:03 +00001706 DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001707 skb_reserve(skb, pad);
1708 skb_put(skb, len);
1709
1710 } else {
1711 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001712 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001713 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001714 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001715reuse_rx:
1716 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1717 goto next_rx;
1718 }
1719
1720 skb->protocol = eth_type_trans(skb, bp->dev);
1721
1722 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001723 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001724 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1725 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001726 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001727 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001728 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001729 }
1730
Eilon Greenstein748e5432009-02-12 08:36:37 +00001731 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001732
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001733#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001734 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001735 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1736 PARSING_FLAGS_VLAN))
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07001737 vlan_gro_receive(&fp->napi, bp->vlgrp,
1738 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001739 else
1740#endif
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07001741 napi_gro_receive(&fp->napi, skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001742
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001743
1744next_rx:
1745 rx_buf->skb = NULL;
1746
1747 bd_cons = NEXT_RX_IDX(bd_cons);
1748 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001749 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1750 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001751next_cqe:
1752 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1753 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001755 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001756 break;
1757 } /* while */
1758
1759 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001760 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001761 fp->rx_comp_cons = sw_comp_cons;
1762 fp->rx_comp_prod = sw_comp_prod;
1763
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001764 /* Update producers */
1765 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1766 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001767
1768 fp->rx_pkt += rx_pkt;
1769 fp->rx_calls++;
1770
1771 return rx_pkt;
1772}
1773
1774static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1775{
1776 struct bnx2x_fastpath *fp = fp_cookie;
1777 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001778
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001779 /* Return here if interrupt is disabled */
1780 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1781 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1782 return IRQ_HANDLED;
1783 }
1784
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001785 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001786 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001787 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001788
1789#ifdef BNX2X_STOP_ON_ERROR
1790 if (unlikely(bp->panic))
1791 return IRQ_HANDLED;
1792#endif
1793
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001794 /* Handle Rx and Tx according to MSI-X vector */
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(fp->tx_cons_sb);
1797 prefetch(&fp->status_blk->u_status_block.status_block_index);
1798 prefetch(&fp->status_blk->c_status_block.status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001800
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001801 return IRQ_HANDLED;
1802}
1803
1804static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1805{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001806 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001807 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001808 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001809 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001810
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001811 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001812 if (unlikely(status == 0)) {
1813 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1814 return IRQ_NONE;
1815 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001816 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001817
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001818 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1820 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1821 return IRQ_HANDLED;
1822 }
1823
Eilon Greenstein3196a882008-08-13 15:58:49 -07001824#ifdef BNX2X_STOP_ON_ERROR
1825 if (unlikely(bp->panic))
1826 return IRQ_HANDLED;
1827#endif
1828
Eilon Greensteinca003922009-08-12 22:53:28 -07001829 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1830 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001831
Eilon Greensteinca003922009-08-12 22:53:28 -07001832 mask = 0x2 << fp->sb_id;
1833 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001834 /* Handle Rx and Tx according to SB id */
1835 prefetch(fp->rx_cons_sb);
1836 prefetch(&fp->status_blk->u_status_block.
1837 status_block_index);
1838 prefetch(fp->tx_cons_sb);
1839 prefetch(&fp->status_blk->c_status_block.
1840 status_block_index);
1841 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001842 status &= ~mask;
1843 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001844 }
1845
Michael Chan993ac7b2009-10-10 13:46:56 +00001846#ifdef BCM_CNIC
1847 mask = 0x2 << CNIC_SB_ID(bp);
1848 if (status & (mask | 0x1)) {
1849 struct cnic_ops *c_ops = NULL;
1850
1851 rcu_read_lock();
1852 c_ops = rcu_dereference(bp->cnic_ops);
1853 if (c_ops)
1854 c_ops->cnic_handler(bp->cnic_data, NULL);
1855 rcu_read_unlock();
1856
1857 status &= ~mask;
1858 }
1859#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001860
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001861 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001862 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001863
1864 status &= ~0x1;
1865 if (!status)
1866 return IRQ_HANDLED;
1867 }
1868
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001869 if (status)
1870 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1871 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001872
1873 return IRQ_HANDLED;
1874}
1875
1876/* end of fast path */
1877
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001878static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001879
1880/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001881
1882/*
1883 * General service functions
1884 */
1885
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001886static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001887{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001888 u32 lock_status;
1889 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001890 int func = BP_FUNC(bp);
1891 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001892 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001893
1894 /* Validating that the resource is within range */
1895 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1896 DP(NETIF_MSG_HW,
1897 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1898 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1899 return -EINVAL;
1900 }
1901
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001902 if (func <= 5) {
1903 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1904 } else {
1905 hw_lock_control_reg =
1906 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1907 }
1908
Eliezer Tamirf1410642008-02-28 11:51:50 -08001909 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001910 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001911 if (lock_status & resource_bit) {
1912 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1913 lock_status, resource_bit);
1914 return -EEXIST;
1915 }
1916
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001917 /* Try for 5 second every 5ms */
1918 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001919 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001920 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1921 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001922 if (lock_status & resource_bit)
1923 return 0;
1924
1925 msleep(5);
1926 }
1927 DP(NETIF_MSG_HW, "Timeout\n");
1928 return -EAGAIN;
1929}
1930
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001931static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001932{
1933 u32 lock_status;
1934 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001935 int func = BP_FUNC(bp);
1936 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001937
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001938 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1939
Eliezer Tamirf1410642008-02-28 11:51:50 -08001940 /* Validating that the resource is within range */
1941 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1942 DP(NETIF_MSG_HW,
1943 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1944 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1945 return -EINVAL;
1946 }
1947
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001948 if (func <= 5) {
1949 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1950 } else {
1951 hw_lock_control_reg =
1952 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1953 }
1954
Eliezer Tamirf1410642008-02-28 11:51:50 -08001955 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001956 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001957 if (!(lock_status & resource_bit)) {
1958 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1959 lock_status, resource_bit);
1960 return -EFAULT;
1961 }
1962
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001963 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001964 return 0;
1965}
1966
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001967/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001968static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001969{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001970 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001971
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001972 if (bp->port.need_hw_lock)
1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001974}
1975
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001976static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001977{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001978 if (bp->port.need_hw_lock)
1979 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001980
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001981 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001982}
1983
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001984int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1985{
1986 /* The GPIO should be swapped if swap register is set and active */
1987 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1988 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1989 int gpio_shift = gpio_num +
1990 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1991 u32 gpio_mask = (1 << gpio_shift);
1992 u32 gpio_reg;
1993 int value;
1994
1995 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1996 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1997 return -EINVAL;
1998 }
1999
2000 /* read GPIO value */
2001 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2002
2003 /* get the requested pin value */
2004 if ((gpio_reg & gpio_mask) == gpio_mask)
2005 value = 1;
2006 else
2007 value = 0;
2008
2009 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2010
2011 return value;
2012}
2013
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002014int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08002015{
2016 /* The GPIO should be swapped if swap register is set and active */
2017 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002018 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002019 int gpio_shift = gpio_num +
2020 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2021 u32 gpio_mask = (1 << gpio_shift);
2022 u32 gpio_reg;
2023
2024 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2025 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2026 return -EINVAL;
2027 }
2028
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002029 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002030 /* read GPIO and mask except the float bits */
2031 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2032
2033 switch (mode) {
2034 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2035 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2036 gpio_num, gpio_shift);
2037 /* clear FLOAT and set CLR */
2038 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2039 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2040 break;
2041
2042 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set SET */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2048 break;
2049
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002050 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2052 gpio_num, gpio_shift);
2053 /* set FLOAT */
2054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 break;
2056
2057 default:
2058 break;
2059 }
2060
2061 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002062 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002063
2064 return 0;
2065}
2066
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002067int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2068{
2069 /* The GPIO should be swapped if swap register is set and active */
2070 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2071 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2072 int gpio_shift = gpio_num +
2073 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2074 u32 gpio_mask = (1 << gpio_shift);
2075 u32 gpio_reg;
2076
2077 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2078 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2079 return -EINVAL;
2080 }
2081
2082 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083 /* read GPIO int */
2084 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2085
2086 switch (mode) {
2087 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2088 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2089 "output low\n", gpio_num, gpio_shift);
2090 /* clear SET and set CLR */
2091 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2093 break;
2094
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2096 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2097 "output high\n", gpio_num, gpio_shift);
2098 /* clear CLR and set SET */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2101 break;
2102
2103 default:
2104 break;
2105 }
2106
2107 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2108 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2109
2110 return 0;
2111}
2112
Eliezer Tamirf1410642008-02-28 11:51:50 -08002113static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2114{
2115 u32 spio_mask = (1 << spio_num);
2116 u32 spio_reg;
2117
2118 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2119 (spio_num > MISC_REGISTERS_SPIO_7)) {
2120 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2121 return -EINVAL;
2122 }
2123
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002124 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002125 /* read SPIO and mask except the float bits */
2126 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2127
2128 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002129 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002130 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2131 /* clear FLOAT and set CLR */
2132 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2133 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2134 break;
2135
Eilon Greenstein6378c022008-08-13 15:59:25 -07002136 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002137 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2138 /* clear FLOAT and set SET */
2139 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2140 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2141 break;
2142
2143 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2144 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2145 /* set FLOAT */
2146 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2147 break;
2148
2149 default:
2150 break;
2151 }
2152
2153 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002155
2156 return 0;
2157}
2158
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002159static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002160{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002161 switch (bp->link_vars.ieee_fc &
2162 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002163 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002164 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002165 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002166 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002167
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002168 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002169 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002170 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002171 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002172
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002173 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002174 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002175 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002176
Eliezer Tamirf1410642008-02-28 11:51:50 -08002177 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002178 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002179 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002180 break;
2181 }
2182}
2183
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002184static void bnx2x_link_report(struct bnx2x *bp)
2185{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002186 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002187 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002188 netdev_err(bp->dev, "NIC Link is Down\n");
Eilon Greenstein2691d512009-08-12 08:22:08 +00002189 return;
2190 }
2191
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002192 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002193 u16 line_speed;
2194
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002195 if (bp->state == BNX2X_STATE_OPEN)
2196 netif_carrier_on(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002197 netdev_info(bp->dev, "NIC Link is Up, ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002198
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002199 line_speed = bp->link_vars.line_speed;
2200 if (IS_E1HMF(bp)) {
2201 u16 vn_max_rate;
2202
2203 vn_max_rate =
2204 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2205 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2206 if (vn_max_rate < line_speed)
2207 line_speed = vn_max_rate;
2208 }
Joe Perches7995c642010-02-17 15:01:52 +00002209 pr_cont("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002210
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002211 if (bp->link_vars.duplex == DUPLEX_FULL)
Joe Perches7995c642010-02-17 15:01:52 +00002212 pr_cont("full duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002213 else
Joe Perches7995c642010-02-17 15:01:52 +00002214 pr_cont("half duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002215
David S. Millerc0700f92008-12-16 23:53:20 -08002216 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2217 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Joe Perches7995c642010-02-17 15:01:52 +00002218 pr_cont(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002219 if (bp->link_vars.flow_ctrl &
2220 BNX2X_FLOW_CTRL_TX)
Joe Perches7995c642010-02-17 15:01:52 +00002221 pr_cont("& transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002222 } else {
Joe Perches7995c642010-02-17 15:01:52 +00002223 pr_cont(", transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002224 }
Joe Perches7995c642010-02-17 15:01:52 +00002225 pr_cont("flow control ON");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002226 }
Joe Perches7995c642010-02-17 15:01:52 +00002227 pr_cont("\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002228
2229 } else { /* link_down */
2230 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002231 netdev_err(bp->dev, "NIC Link is Down\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002232 }
2233}
2234
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002235static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002236{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002237 if (!BP_NOMCP(bp)) {
2238 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002239
Eilon Greenstein19680c42008-08-13 15:47:33 -07002240 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002241 /* It is recommended to turn off RX FC for jumbo frames
2242 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002243 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002244 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002245 else
David S. Millerc0700f92008-12-16 23:53:20 -08002246 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002247
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002248 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002249
2250 if (load_mode == LOAD_DIAG)
2251 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2252
Eilon Greenstein19680c42008-08-13 15:47:33 -07002253 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002254
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002255 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002256
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002257 bnx2x_calc_fc_adv(bp);
2258
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002259 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2260 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002261 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002262 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002263
Eilon Greenstein19680c42008-08-13 15:47:33 -07002264 return rc;
2265 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002266 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002267 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002268}
2269
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002270static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002271{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002272 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002273 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002274 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002275 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002276
Eilon Greenstein19680c42008-08-13 15:47:33 -07002277 bnx2x_calc_fc_adv(bp);
2278 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002279 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002280}
2281
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002282static void bnx2x__link_reset(struct bnx2x *bp)
2283{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002284 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002285 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002286 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002287 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002288 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002289 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002290}
2291
2292static u8 bnx2x_link_test(struct bnx2x *bp)
2293{
2294 u8 rc;
2295
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002296 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002297 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002298 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002299
2300 return rc;
2301}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002302
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002303static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002304{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002305 u32 r_param = bp->link_vars.line_speed / 8;
2306 u32 fair_periodic_timeout_usec;
2307 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002308
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002309 memset(&(bp->cmng.rs_vars), 0,
2310 sizeof(struct rate_shaping_vars_per_port));
2311 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002312
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002313 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2314 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002315
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002316 /* this is the threshold below which no timer arming will occur
2317 1.25 coefficient is for the threshold to be a little bigger
2318 than the real time, to compensate for timer in-accuracy */
2319 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002320 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2321
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002322 /* resolution of fairness timer */
2323 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2324 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2325 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002326
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002327 /* this is the threshold below which we won't arm the timer anymore */
2328 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002329
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002330 /* we multiply by 1e3/8 to get bytes/msec.
2331 We don't want the credits to pass a credit
2332 of the t_fair*FAIR_MEM (algorithm resolution) */
2333 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2334 /* since each tick is 4 usec */
2335 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336}
2337
Eilon Greenstein2691d512009-08-12 08:22:08 +00002338/* Calculates the sum of vn_min_rates.
2339 It's needed for further normalizing of the min_rates.
2340 Returns:
2341 sum of vn_min_rates.
2342 or
2343 0 - if all the min_rates are 0.
2344 In the later case fainess algorithm should be deactivated.
2345 If not all min_rates are zero then those that are zeroes will be set to 1.
2346 */
2347static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2348{
2349 int all_zero = 1;
2350 int port = BP_PORT(bp);
2351 int vn;
2352
2353 bp->vn_weight_sum = 0;
2354 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2355 int func = 2*vn + port;
2356 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2357 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2358 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2359
2360 /* Skip hidden vns */
2361 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2362 continue;
2363
2364 /* If min rate is zero - set it to 1 */
2365 if (!vn_min_rate)
2366 vn_min_rate = DEF_MIN_RATE;
2367 else
2368 all_zero = 0;
2369
2370 bp->vn_weight_sum += vn_min_rate;
2371 }
2372
2373 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002374 if (all_zero) {
2375 bp->cmng.flags.cmng_enables &=
2376 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2377 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2378 " fairness will be disabled\n");
2379 } else
2380 bp->cmng.flags.cmng_enables |=
2381 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002382}
2383
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002384static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385{
2386 struct rate_shaping_vars_per_vn m_rs_vn;
2387 struct fairness_vars_per_vn m_fair_vn;
2388 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2389 u16 vn_min_rate, vn_max_rate;
2390 int i;
2391
2392 /* If function is hidden - set min and max to zeroes */
2393 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2394 vn_min_rate = 0;
2395 vn_max_rate = 0;
2396
2397 } else {
2398 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2399 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002400 /* If min rate is zero - set it to 1 */
2401 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002402 vn_min_rate = DEF_MIN_RATE;
2403 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2404 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2405 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002406 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002407 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002408 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002409
2410 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2411 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2412
2413 /* global vn counter - maximal Mbps for this vn */
2414 m_rs_vn.vn_counter.rate = vn_max_rate;
2415
2416 /* quota - number of bytes transmitted in this period */
2417 m_rs_vn.vn_counter.quota =
2418 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2419
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002420 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002421 /* credit for each period of the fairness algorithm:
2422 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002423 vn_weight_sum should not be larger than 10000, thus
2424 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2425 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002426 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002427 max((u32)(vn_min_rate * (T_FAIR_COEF /
2428 (8 * bp->vn_weight_sum))),
2429 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002430 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2431 m_fair_vn.vn_credit_delta);
2432 }
2433
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002434 /* Store it to internal memory */
2435 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2436 REG_WR(bp, BAR_XSTRORM_INTMEM +
2437 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2438 ((u32 *)(&m_rs_vn))[i]);
2439
2440 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2441 REG_WR(bp, BAR_XSTRORM_INTMEM +
2442 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2443 ((u32 *)(&m_fair_vn))[i]);
2444}
2445
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002446
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002447/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002448static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002449{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002450 /* Make sure that we are synced with the current statistics */
2451 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2452
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002453 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002454
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002455 if (bp->link_vars.link_up) {
2456
Eilon Greenstein1c063282009-02-12 08:36:43 +00002457 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002458 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002459 int port = BP_PORT(bp);
2460 u32 pause_enabled = 0;
2461
2462 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2463 pause_enabled = 1;
2464
2465 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002466 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002467 pause_enabled);
2468 }
2469
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002470 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2471 struct host_port_stats *pstats;
2472
2473 pstats = bnx2x_sp(bp, port_stats);
2474 /* reset old bmac stats */
2475 memset(&(pstats->mac_stx[0]), 0,
2476 sizeof(struct mac_stx));
2477 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002478 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002479 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2480 }
2481
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002482 /* indicate link status */
2483 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002484
2485 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002486 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002487 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002488 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002489
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002490 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002491 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2492 if (vn == BP_E1HVN(bp))
2493 continue;
2494
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002495 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2497 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2498 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002499
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002500 if (bp->link_vars.link_up) {
2501 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002502
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002503 /* Init rate shaping and fairness contexts */
2504 bnx2x_init_port_minmax(bp);
2505
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002506 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002507 bnx2x_init_vn_minmax(bp, 2*vn + port);
2508
2509 /* Store it to internal memory */
2510 for (i = 0;
2511 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2512 REG_WR(bp, BAR_XSTRORM_INTMEM +
2513 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2514 ((u32 *)(&bp->cmng))[i]);
2515 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002516 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002517}
2518
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002519static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002520{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002521 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002522 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002523
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002524 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2525
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002526 if (bp->link_vars.link_up)
2527 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2528 else
2529 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2530
Eilon Greenstein2691d512009-08-12 08:22:08 +00002531 bnx2x_calc_vn_weight_sum(bp);
2532
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002533 /* indicate link status */
2534 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002535}
2536
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002537static void bnx2x_pmf_update(struct bnx2x *bp)
2538{
2539 int port = BP_PORT(bp);
2540 u32 val;
2541
2542 bp->port.pmf = 1;
2543 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2544
2545 /* enable nig attention */
2546 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2547 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2548 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002549
2550 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002551}
2552
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002553/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002554
2555/* slow path */
2556
2557/*
2558 * General service functions
2559 */
2560
Eilon Greenstein2691d512009-08-12 08:22:08 +00002561/* send the MCP a request, block until there is a reply */
2562u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2563{
2564 int func = BP_FUNC(bp);
2565 u32 seq = ++bp->fw_seq;
2566 u32 rc = 0;
2567 u32 cnt = 1;
2568 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2569
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002570 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002571 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2572 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2573
2574 do {
2575 /* let the FW do it's magic ... */
2576 msleep(delay);
2577
2578 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2579
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002580 /* Give the FW up to 5 second (500*10ms) */
2581 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002582
2583 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2584 cnt*delay, rc, seq);
2585
2586 /* is this a reply to our command? */
2587 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2588 rc &= FW_MSG_CODE_MASK;
2589 else {
2590 /* FW BUG! */
2591 BNX2X_ERR("FW failed to respond!\n");
2592 bnx2x_fw_dump(bp);
2593 rc = 0;
2594 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002595 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002596
2597 return rc;
2598}
2599
2600static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002601static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002602static void bnx2x_set_rx_mode(struct net_device *dev);
2603
2604static void bnx2x_e1h_disable(struct bnx2x *bp)
2605{
2606 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002607
2608 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002609
2610 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2611
Eilon Greenstein2691d512009-08-12 08:22:08 +00002612 netif_carrier_off(bp->dev);
2613}
2614
2615static void bnx2x_e1h_enable(struct bnx2x *bp)
2616{
2617 int port = BP_PORT(bp);
2618
2619 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2620
Eilon Greenstein2691d512009-08-12 08:22:08 +00002621 /* Tx queue should be only reenabled */
2622 netif_tx_wake_all_queues(bp->dev);
2623
Eilon Greenstein061bc702009-10-15 00:18:47 -07002624 /*
2625 * Should not call netif_carrier_on since it will be called if the link
2626 * is up when checking for link state
2627 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002628}
2629
2630static void bnx2x_update_min_max(struct bnx2x *bp)
2631{
2632 int port = BP_PORT(bp);
2633 int vn, i;
2634
2635 /* Init rate shaping and fairness contexts */
2636 bnx2x_init_port_minmax(bp);
2637
2638 bnx2x_calc_vn_weight_sum(bp);
2639
2640 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2641 bnx2x_init_vn_minmax(bp, 2*vn + port);
2642
2643 if (bp->port.pmf) {
2644 int func;
2645
2646 /* Set the attention towards other drivers on the same port */
2647 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2648 if (vn == BP_E1HVN(bp))
2649 continue;
2650
2651 func = ((vn << 1) | port);
2652 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2653 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2654 }
2655
2656 /* Store it to internal memory */
2657 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2658 REG_WR(bp, BAR_XSTRORM_INTMEM +
2659 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2660 ((u32 *)(&bp->cmng))[i]);
2661 }
2662}
2663
2664static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2665{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002666 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002667
2668 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2669
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002670 /*
2671 * This is the only place besides the function initialization
2672 * where the bp->flags can change so it is done without any
2673 * locks
2674 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002675 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2676 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002677 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002678
2679 bnx2x_e1h_disable(bp);
2680 } else {
2681 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002682 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002683
2684 bnx2x_e1h_enable(bp);
2685 }
2686 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2687 }
2688 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2689
2690 bnx2x_update_min_max(bp);
2691 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2692 }
2693
2694 /* Report results to MCP */
2695 if (dcc_event)
2696 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2697 else
2698 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2699}
2700
Michael Chan28912902009-10-10 13:46:53 +00002701/* must be called under the spq lock */
2702static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2703{
2704 struct eth_spe *next_spe = bp->spq_prod_bd;
2705
2706 if (bp->spq_prod_bd == bp->spq_last_bd) {
2707 bp->spq_prod_bd = bp->spq;
2708 bp->spq_prod_idx = 0;
2709 DP(NETIF_MSG_TIMER, "end of spq\n");
2710 } else {
2711 bp->spq_prod_bd++;
2712 bp->spq_prod_idx++;
2713 }
2714 return next_spe;
2715}
2716
2717/* must be called under the spq lock */
2718static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2719{
2720 int func = BP_FUNC(bp);
2721
2722 /* Make sure that BD data is updated before writing the producer */
2723 wmb();
2724
2725 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2726 bp->spq_prod_idx);
2727 mmiowb();
2728}
2729
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730/* the slow path queue is odd since completions arrive on the fastpath ring */
2731static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 u32 data_hi, u32 data_lo, int common)
2733{
Michael Chan28912902009-10-10 13:46:53 +00002734 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002735
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002736 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2737 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2739 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2740 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2741
2742#ifdef BNX2X_STOP_ON_ERROR
2743 if (unlikely(bp->panic))
2744 return -EIO;
2745#endif
2746
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002747 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748
2749 if (!bp->spq_left) {
2750 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002751 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 bnx2x_panic();
2753 return -EBUSY;
2754 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002755
Michael Chan28912902009-10-10 13:46:53 +00002756 spe = bnx2x_sp_get_next(bp);
2757
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002758 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002759 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002760 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2761 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002762 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002763 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002764 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002765 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2766
Michael Chan28912902009-10-10 13:46:53 +00002767 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2768 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002769
2770 bp->spq_left--;
2771
Michael Chan28912902009-10-10 13:46:53 +00002772 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002773 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002774 return 0;
2775}
2776
2777/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002778static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002780 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002781 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002782
2783 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002784 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002785 val = (1UL << 31);
2786 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2787 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2788 if (val & (1L << 31))
2789 break;
2790
2791 msleep(5);
2792 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002793 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002794 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002795 rc = -EBUSY;
2796 }
2797
2798 return rc;
2799}
2800
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002801/* release split MCP access lock register */
2802static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002803{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002804 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002805}
2806
2807static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2808{
2809 struct host_def_status_block *def_sb = bp->def_status_blk;
2810 u16 rc = 0;
2811
2812 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002813 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2814 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2815 rc |= 1;
2816 }
2817 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2818 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2819 rc |= 2;
2820 }
2821 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2822 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2823 rc |= 4;
2824 }
2825 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2826 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2827 rc |= 8;
2828 }
2829 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2830 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2831 rc |= 16;
2832 }
2833 return rc;
2834}
2835
2836/*
2837 * slow path service functions
2838 */
2839
2840static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2841{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002842 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002843 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2844 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2846 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002847 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2848 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002849 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002850 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002851
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002852 if (bp->attn_state & asserted)
2853 BNX2X_ERR("IGU ERROR\n");
2854
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856 aeu_mask = REG_RD(bp, aeu_addr);
2857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002858 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002859 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002860 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002861 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002862
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002863 REG_WR(bp, aeu_addr, aeu_mask);
2864 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002865
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002866 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002867 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002868 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002869
2870 if (asserted & ATTN_HARD_WIRED_MASK) {
2871 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002872
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002873 bnx2x_acquire_phy_lock(bp);
2874
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002875 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002876 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002877 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002878
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002879 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002880
2881 /* handle unicore attn? */
2882 }
2883 if (asserted & ATTN_SW_TIMER_4_FUNC)
2884 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2885
2886 if (asserted & GPIO_2_FUNC)
2887 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2888
2889 if (asserted & GPIO_3_FUNC)
2890 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2891
2892 if (asserted & GPIO_4_FUNC)
2893 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2894
2895 if (port == 0) {
2896 if (asserted & ATTN_GENERAL_ATTN_1) {
2897 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2898 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2899 }
2900 if (asserted & ATTN_GENERAL_ATTN_2) {
2901 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2902 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2903 }
2904 if (asserted & ATTN_GENERAL_ATTN_3) {
2905 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2906 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2907 }
2908 } else {
2909 if (asserted & ATTN_GENERAL_ATTN_4) {
2910 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2912 }
2913 if (asserted & ATTN_GENERAL_ATTN_5) {
2914 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2915 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2916 }
2917 if (asserted & ATTN_GENERAL_ATTN_6) {
2918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2920 }
2921 }
2922
2923 } /* if hardwired */
2924
Eilon Greenstein5c862842008-08-13 15:51:48 -07002925 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2926 asserted, hc_addr);
2927 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002928
2929 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002930 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002931 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002932 bnx2x_release_phy_lock(bp);
2933 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002934}
2935
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002936static inline void bnx2x_fan_failure(struct bnx2x *bp)
2937{
2938 int port = BP_PORT(bp);
2939
2940 /* mark the failure */
2941 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2942 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2943 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2944 bp->link_params.ext_phy_config);
2945
2946 /* log the failure */
Joe Perches7995c642010-02-17 15:01:52 +00002947 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2948 "Please contact Dell Support for assistance.\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002949}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002950
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002951static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2952{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002953 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002954 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002955 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002956
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002957 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2958 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002959
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002960 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002961
2962 val = REG_RD(bp, reg_offset);
2963 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2964 REG_WR(bp, reg_offset, val);
2965
2966 BNX2X_ERR("SPIO5 hw attention\n");
2967
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002968 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002969 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2970 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002971 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002972 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002973 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002974 /* The PHY reset is controlled by GPIO 1 */
2975 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2976 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002977 break;
2978
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002979 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2980 /* The PHY reset is controlled by GPIO 1 */
2981 /* fake the port number to cancel the swap done in
2982 set_gpio() */
2983 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2984 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2985 port = (swap_val && swap_override) ^ 1;
2986 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2987 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2988 break;
2989
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002990 default:
2991 break;
2992 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002993 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002994 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002995
Eilon Greenstein589abe32009-02-12 08:36:55 +00002996 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2997 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2998 bnx2x_acquire_phy_lock(bp);
2999 bnx2x_handle_module_detect_int(&bp->link_params);
3000 bnx2x_release_phy_lock(bp);
3001 }
3002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003003 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3004
3005 val = REG_RD(bp, reg_offset);
3006 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3007 REG_WR(bp, reg_offset, val);
3008
3009 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003010 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003011 bnx2x_panic();
3012 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003013}
3014
3015static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3016{
3017 u32 val;
3018
Eilon Greenstein0626b892009-02-12 08:38:14 +00003019 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003020
3021 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3022 BNX2X_ERR("DB hw attention 0x%x\n", val);
3023 /* DORQ discard attention */
3024 if (val & 0x2)
3025 BNX2X_ERR("FATAL error from DORQ\n");
3026 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003027
3028 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3029
3030 int port = BP_PORT(bp);
3031 int reg_offset;
3032
3033 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3034 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3035
3036 val = REG_RD(bp, reg_offset);
3037 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3038 REG_WR(bp, reg_offset, val);
3039
3040 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003041 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003042 bnx2x_panic();
3043 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003044}
3045
3046static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3047{
3048 u32 val;
3049
3050 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3051
3052 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3053 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3054 /* CFC error attention */
3055 if (val & 0x2)
3056 BNX2X_ERR("FATAL error from CFC\n");
3057 }
3058
3059 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3060
3061 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3062 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3063 /* RQ_USDMDP_FIFO_OVERFLOW */
3064 if (val & 0x18000)
3065 BNX2X_ERR("FATAL error from PXP\n");
3066 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003067
3068 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3069
3070 int port = BP_PORT(bp);
3071 int reg_offset;
3072
3073 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3074 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3075
3076 val = REG_RD(bp, reg_offset);
3077 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3078 REG_WR(bp, reg_offset, val);
3079
3080 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003081 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003082 bnx2x_panic();
3083 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003084}
3085
3086static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3087{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003088 u32 val;
3089
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003090 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003092 if (attn & BNX2X_PMF_LINK_ASSERT) {
3093 int func = BP_FUNC(bp);
3094
3095 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003096 bp->mf_config = SHMEM_RD(bp,
3097 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003098 val = SHMEM_RD(bp, func_mb[func].drv_status);
3099 if (val & DRV_STATUS_DCC_EVENT_MASK)
3100 bnx2x_dcc_event(bp,
3101 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003102 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003103 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003104 bnx2x_pmf_update(bp);
3105
3106 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003107
3108 BNX2X_ERR("MC assert!\n");
3109 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3110 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3113 bnx2x_panic();
3114
3115 } else if (attn & BNX2X_MCP_ASSERT) {
3116
3117 BNX2X_ERR("MCP assert!\n");
3118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003119 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003120
3121 } else
3122 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3123 }
3124
3125 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003126 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3127 if (attn & BNX2X_GRC_TIMEOUT) {
3128 val = CHIP_IS_E1H(bp) ?
3129 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3130 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3131 }
3132 if (attn & BNX2X_GRC_RSV) {
3133 val = CHIP_IS_E1H(bp) ?
3134 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3135 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3136 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003137 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003138 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003139}
3140
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003141static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3142static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3143
3144
3145#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3146#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3147#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3148#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3149#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3150#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3151/*
3152 * should be run under rtnl lock
3153 */
3154static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3155{
3156 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3158 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3159 barrier();
3160 mmiowb();
3161}
3162
3163/*
3164 * should be run under rtnl lock
3165 */
3166static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3167{
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val |= (1 << 16);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171 barrier();
3172 mmiowb();
3173}
3174
3175/*
3176 * should be run under rtnl lock
3177 */
3178static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3179{
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3182 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3183}
3184
3185/*
3186 * should be run under rtnl lock
3187 */
3188static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3189{
3190 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191
3192 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3193
3194 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3195 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3196 barrier();
3197 mmiowb();
3198}
3199
3200/*
3201 * should be run under rtnl lock
3202 */
3203static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3204{
3205 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3206
3207 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3208
3209 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3210 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3211 barrier();
3212 mmiowb();
3213
3214 return val1;
3215}
3216
3217/*
3218 * should be run under rtnl lock
3219 */
3220static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3221{
3222 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3223}
3224
3225static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3226{
3227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3229}
3230
3231static inline void _print_next_block(int idx, const char *blk)
3232{
3233 if (idx)
3234 pr_cont(", ");
3235 pr_cont("%s", blk);
3236}
3237
3238static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3239{
3240 int i = 0;
3241 u32 cur_bit = 0;
3242 for (i = 0; sig; i++) {
3243 cur_bit = ((u32)0x1 << i);
3244 if (sig & cur_bit) {
3245 switch (cur_bit) {
3246 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3247 _print_next_block(par_num++, "BRB");
3248 break;
3249 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3250 _print_next_block(par_num++, "PARSER");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3253 _print_next_block(par_num++, "TSDM");
3254 break;
3255 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3256 _print_next_block(par_num++, "SEARCHER");
3257 break;
3258 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3259 _print_next_block(par_num++, "TSEMI");
3260 break;
3261 }
3262
3263 /* Clear the bit */
3264 sig &= ~cur_bit;
3265 }
3266 }
3267
3268 return par_num;
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3280 _print_next_block(par_num++, "PBCLIENT");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3283 _print_next_block(par_num++, "QM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3289 _print_next_block(par_num++, "XSEMI");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3292 _print_next_block(par_num++, "DOORBELLQ");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3295 _print_next_block(par_num++, "VAUX PCI CORE");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3298 _print_next_block(par_num++, "DEBUG");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3301 _print_next_block(par_num++, "USDM");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3304 _print_next_block(par_num++, "USEMI");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3307 _print_next_block(par_num++, "UPB");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3310 _print_next_block(par_num++, "CSDM");
3311 break;
3312 }
3313
3314 /* Clear the bit */
3315 sig &= ~cur_bit;
3316 }
3317 }
3318
3319 return par_num;
3320}
3321
3322static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3323{
3324 int i = 0;
3325 u32 cur_bit = 0;
3326 for (i = 0; sig; i++) {
3327 cur_bit = ((u32)0x1 << i);
3328 if (sig & cur_bit) {
3329 switch (cur_bit) {
3330 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3331 _print_next_block(par_num++, "CSEMI");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3334 _print_next_block(par_num++, "PXP");
3335 break;
3336 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3337 _print_next_block(par_num++,
3338 "PXPPCICLOCKCLIENT");
3339 break;
3340 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3341 _print_next_block(par_num++, "CFC");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3344 _print_next_block(par_num++, "CDU");
3345 break;
3346 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3347 _print_next_block(par_num++, "IGU");
3348 break;
3349 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3350 _print_next_block(par_num++, "MISC");
3351 break;
3352 }
3353
3354 /* Clear the bit */
3355 sig &= ~cur_bit;
3356 }
3357 }
3358
3359 return par_num;
3360}
3361
3362static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3363{
3364 int i = 0;
3365 u32 cur_bit = 0;
3366 for (i = 0; sig; i++) {
3367 cur_bit = ((u32)0x1 << i);
3368 if (sig & cur_bit) {
3369 switch (cur_bit) {
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3371 _print_next_block(par_num++, "MCP ROM");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP RX");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3377 _print_next_block(par_num++, "MCP UMP TX");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3380 _print_next_block(par_num++, "MCP SCPAD");
3381 break;
3382 }
3383
3384 /* Clear the bit */
3385 sig &= ~cur_bit;
3386 }
3387 }
3388
3389 return par_num;
3390}
3391
3392static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3393 u32 sig2, u32 sig3)
3394{
3395 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3396 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3397 int par_num = 0;
3398 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3399 "[0]:0x%08x [1]:0x%08x "
3400 "[2]:0x%08x [3]:0x%08x\n",
3401 sig0 & HW_PRTY_ASSERT_SET_0,
3402 sig1 & HW_PRTY_ASSERT_SET_1,
3403 sig2 & HW_PRTY_ASSERT_SET_2,
3404 sig3 & HW_PRTY_ASSERT_SET_3);
3405 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3406 bp->dev->name);
3407 par_num = bnx2x_print_blocks_with_parity0(
3408 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3409 par_num = bnx2x_print_blocks_with_parity1(
3410 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3411 par_num = bnx2x_print_blocks_with_parity2(
3412 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3413 par_num = bnx2x_print_blocks_with_parity3(
3414 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3415 printk("\n");
3416 return true;
3417 } else
3418 return false;
3419}
3420
3421static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003422{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003423 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003424 int port = BP_PORT(bp);
3425
3426 attn.sig[0] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3428 port*4);
3429 attn.sig[1] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3431 port*4);
3432 attn.sig[2] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3434 port*4);
3435 attn.sig[3] = REG_RD(bp,
3436 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3437 port*4);
3438
3439 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3440 attn.sig[3]);
3441}
3442
3443static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3444{
3445 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003446 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003447 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003448 u32 reg_addr;
3449 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003450 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003451
3452 /* need to take HW lock because MCP or other port might also
3453 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003454 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003455
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003456 if (bnx2x_chk_parity_attn(bp)) {
3457 bp->recovery_state = BNX2X_RECOVERY_INIT;
3458 bnx2x_set_reset_in_progress(bp);
3459 schedule_delayed_work(&bp->reset_task, 0);
3460 /* Disable HW interrupts */
3461 bnx2x_int_disable(bp);
3462 bnx2x_release_alr(bp);
3463 /* In case of parity errors don't handle attentions so that
3464 * other function would "see" parity errors.
3465 */
3466 return;
3467 }
3468
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003469 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3470 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3471 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3472 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003473 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3474 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003475
3476 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3477 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003478 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003479
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003480 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003481 index, group_mask->sig[0], group_mask->sig[1],
3482 group_mask->sig[2], group_mask->sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003483
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003484 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003485 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003486 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003487 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003488 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003489 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003490 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003491 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003492 }
3493 }
3494
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003495 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003496
Eilon Greenstein5c862842008-08-13 15:51:48 -07003497 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003498
3499 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003500 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3501 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003502 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003503
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003504 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003505 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003506
3507 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3508 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3509
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003510 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003512
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003513 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3514 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003515 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003516 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3517
3518 REG_WR(bp, reg_addr, aeu_mask);
3519 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003520
3521 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3522 bp->attn_state &= ~deasserted;
3523 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3524}
3525
3526static void bnx2x_attn_int(struct bnx2x *bp)
3527{
3528 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003529 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3530 attn_bits);
3531 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003533 u32 attn_state = bp->attn_state;
3534
3535 /* look for changed bits */
3536 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3537 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3538
3539 DP(NETIF_MSG_HW,
3540 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3541 attn_bits, attn_ack, asserted, deasserted);
3542
3543 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003544 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003545
3546 /* handle bits that were raised */
3547 if (asserted)
3548 bnx2x_attn_int_asserted(bp, asserted);
3549
3550 if (deasserted)
3551 bnx2x_attn_int_deasserted(bp, deasserted);
3552}
3553
3554static void bnx2x_sp_task(struct work_struct *work)
3555{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003556 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003557 u16 status;
3558
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003559
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003560 /* Return here if interrupt is disabled */
3561 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003562 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003563 return;
3564 }
3565
3566 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003567/* if (status == 0) */
3568/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003569
Eilon Greenstein3196a882008-08-13 15:58:49 -07003570 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003571
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003572 /* HW attentions */
3573 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003574 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003575
Eilon Greenstein68d59482009-01-14 21:27:36 -08003576 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003577 IGU_INT_NOP, 1);
3578 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3579 IGU_INT_NOP, 1);
3580 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3581 IGU_INT_NOP, 1);
3582 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3583 IGU_INT_NOP, 1);
3584 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3585 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003586
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003587}
3588
3589static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3590{
3591 struct net_device *dev = dev_instance;
3592 struct bnx2x *bp = netdev_priv(dev);
3593
3594 /* Return here if interrupt is disabled */
3595 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003596 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003597 return IRQ_HANDLED;
3598 }
3599
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003600 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003601
3602#ifdef BNX2X_STOP_ON_ERROR
3603 if (unlikely(bp->panic))
3604 return IRQ_HANDLED;
3605#endif
3606
Michael Chan993ac7b2009-10-10 13:46:56 +00003607#ifdef BCM_CNIC
3608 {
3609 struct cnic_ops *c_ops;
3610
3611 rcu_read_lock();
3612 c_ops = rcu_dereference(bp->cnic_ops);
3613 if (c_ops)
3614 c_ops->cnic_handler(bp->cnic_data, NULL);
3615 rcu_read_unlock();
3616 }
3617#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003618 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003619
3620 return IRQ_HANDLED;
3621}
3622
3623/* end of slow path */
3624
3625/* Statistics */
3626
3627/****************************************************************************
3628* Macros
3629****************************************************************************/
3630
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003631/* sum[hi:lo] += add[hi:lo] */
3632#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3633 do { \
3634 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003635 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003636 } while (0)
3637
3638/* difference = minuend - subtrahend */
3639#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3640 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003641 if (m_lo < s_lo) { \
3642 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003643 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003644 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003645 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003646 d_hi--; \
3647 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003648 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003649 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003650 d_hi = 0; \
3651 d_lo = 0; \
3652 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003653 } else { \
3654 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003655 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003656 d_hi = 0; \
3657 d_lo = 0; \
3658 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003659 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003660 d_hi = m_hi - s_hi; \
3661 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003662 } \
3663 } \
3664 } while (0)
3665
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003666#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003667 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003668 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3669 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3670 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3671 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3672 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3673 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003674 } while (0)
3675
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003676#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003677 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003678 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3679 diff.lo, new->s##_lo, old->s##_lo); \
3680 ADD_64(estats->t##_hi, diff.hi, \
3681 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003682 } while (0)
3683
3684/* sum[hi:lo] += add */
3685#define ADD_EXTEND_64(s_hi, s_lo, a) \
3686 do { \
3687 s_lo += a; \
3688 s_hi += (s_lo < a) ? 1 : 0; \
3689 } while (0)
3690
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003691#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003692 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003693 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3694 pstats->mac_stx[1].s##_lo, \
3695 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003696 } while (0)
3697
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003698#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003699 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003700 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3701 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003702 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3703 } while (0)
3704
3705#define UPDATE_EXTEND_USTAT(s, t) \
3706 do { \
3707 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3708 old_uclient->s = uclient->s; \
3709 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003710 } while (0)
3711
3712#define UPDATE_EXTEND_XSTAT(s, t) \
3713 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003714 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3715 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003716 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3717 } while (0)
3718
3719/* minuend -= subtrahend */
3720#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3721 do { \
3722 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3723 } while (0)
3724
3725/* minuend[hi:lo] -= subtrahend */
3726#define SUB_EXTEND_64(m_hi, m_lo, s) \
3727 do { \
3728 SUB_64(m_hi, 0, m_lo, s); \
3729 } while (0)
3730
3731#define SUB_EXTEND_USTAT(s, t) \
3732 do { \
3733 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3734 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003735 } while (0)
3736
3737/*
3738 * General service functions
3739 */
3740
3741static inline long bnx2x_hilo(u32 *hiref)
3742{
3743 u32 lo = *(hiref + 1);
3744#if (BITS_PER_LONG == 64)
3745 u32 hi = *hiref;
3746
3747 return HILO_U64(hi, lo);
3748#else
3749 return lo;
3750#endif
3751}
3752
3753/*
3754 * Init service functions
3755 */
3756
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003757static void bnx2x_storm_stats_post(struct bnx2x *bp)
3758{
3759 if (!bp->stats_pending) {
3760 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003761 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003762
3763 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003764 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003765 for_each_queue(bp, i)
3766 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003767
3768 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3769 ((u32 *)&ramrod_data)[1],
3770 ((u32 *)&ramrod_data)[0], 0);
3771 if (rc == 0) {
3772 /* stats ramrod has it's own slot on the spq */
3773 bp->spq_left++;
3774 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003775 }
3776 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003777}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003778
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003779static void bnx2x_hw_stats_post(struct bnx2x *bp)
3780{
3781 struct dmae_command *dmae = &bp->stats_dmae;
3782 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3783
3784 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003785 if (CHIP_REV_IS_SLOW(bp))
3786 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003787
3788 /* loader */
3789 if (bp->executer_idx) {
3790 int loader_idx = PMF_DMAE_C(bp);
3791
3792 memset(dmae, 0, sizeof(struct dmae_command));
3793
3794 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3795 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3796 DMAE_CMD_DST_RESET |
3797#ifdef __BIG_ENDIAN
3798 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3799#else
3800 DMAE_CMD_ENDIANITY_DW_SWAP |
3801#endif
3802 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3803 DMAE_CMD_PORT_0) |
3804 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3805 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3806 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3807 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3808 sizeof(struct dmae_command) *
3809 (loader_idx + 1)) >> 2;
3810 dmae->dst_addr_hi = 0;
3811 dmae->len = sizeof(struct dmae_command) >> 2;
3812 if (CHIP_IS_E1(bp))
3813 dmae->len--;
3814 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3815 dmae->comp_addr_hi = 0;
3816 dmae->comp_val = 1;
3817
3818 *stats_comp = 0;
3819 bnx2x_post_dmae(bp, dmae, loader_idx);
3820
3821 } else if (bp->func_stx) {
3822 *stats_comp = 0;
3823 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3824 }
3825}
3826
3827static int bnx2x_stats_comp(struct bnx2x *bp)
3828{
3829 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3830 int cnt = 10;
3831
3832 might_sleep();
3833 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003834 if (!cnt) {
3835 BNX2X_ERR("timeout waiting for stats finished\n");
3836 break;
3837 }
3838 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003839 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003840 }
3841 return 1;
3842}
3843
3844/*
3845 * Statistics service functions
3846 */
3847
3848static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3849{
3850 struct dmae_command *dmae;
3851 u32 opcode;
3852 int loader_idx = PMF_DMAE_C(bp);
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855 /* sanity */
3856 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3857 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003858 return;
3859 }
3860
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003861 bp->executer_idx = 0;
3862
3863 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3864 DMAE_CMD_C_ENABLE |
3865 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3866#ifdef __BIG_ENDIAN
3867 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3868#else
3869 DMAE_CMD_ENDIANITY_DW_SWAP |
3870#endif
3871 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3872 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3873
3874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3875 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3876 dmae->src_addr_lo = bp->port.port_stx >> 2;
3877 dmae->src_addr_hi = 0;
3878 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3879 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3880 dmae->len = DMAE_LEN32_RD_MAX;
3881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882 dmae->comp_addr_hi = 0;
3883 dmae->comp_val = 1;
3884
3885 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3886 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3887 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3888 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003889 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3890 DMAE_LEN32_RD_MAX * 4);
3891 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3892 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003893 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3894 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_val = DMAE_COMP_VAL;
3897
3898 *stats_comp = 0;
3899 bnx2x_hw_stats_post(bp);
3900 bnx2x_stats_comp(bp);
3901}
3902
3903static void bnx2x_port_stats_init(struct bnx2x *bp)
3904{
3905 struct dmae_command *dmae;
3906 int port = BP_PORT(bp);
3907 int vn = BP_E1HVN(bp);
3908 u32 opcode;
3909 int loader_idx = PMF_DMAE_C(bp);
3910 u32 mac_addr;
3911 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3912
3913 /* sanity */
3914 if (!bp->link_vars.link_up || !bp->port.pmf) {
3915 BNX2X_ERR("BUG!\n");
3916 return;
3917 }
3918
3919 bp->executer_idx = 0;
3920
3921 /* MCP */
3922 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3923 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3924 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3925#ifdef __BIG_ENDIAN
3926 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3927#else
3928 DMAE_CMD_ENDIANITY_DW_SWAP |
3929#endif
3930 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3931 (vn << DMAE_CMD_E1HVN_SHIFT));
3932
3933 if (bp->port.port_stx) {
3934
3935 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3936 dmae->opcode = opcode;
3937 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3938 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3939 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3940 dmae->dst_addr_hi = 0;
3941 dmae->len = sizeof(struct host_port_stats) >> 2;
3942 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3943 dmae->comp_addr_hi = 0;
3944 dmae->comp_val = 1;
3945 }
3946
3947 if (bp->func_stx) {
3948
3949 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3950 dmae->opcode = opcode;
3951 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3952 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3953 dmae->dst_addr_lo = bp->func_stx >> 2;
3954 dmae->dst_addr_hi = 0;
3955 dmae->len = sizeof(struct host_func_stats) >> 2;
3956 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3957 dmae->comp_addr_hi = 0;
3958 dmae->comp_val = 1;
3959 }
3960
3961 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003962 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3963 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3964 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3965#ifdef __BIG_ENDIAN
3966 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3967#else
3968 DMAE_CMD_ENDIANITY_DW_SWAP |
3969#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003970 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3971 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003972
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003973 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003974
3975 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3976 NIG_REG_INGRESS_BMAC0_MEM);
3977
3978 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3979 BIGMAC_REGISTER_TX_STAT_GTBYT */
3980 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3981 dmae->opcode = opcode;
3982 dmae->src_addr_lo = (mac_addr +
3983 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3984 dmae->src_addr_hi = 0;
3985 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3986 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3987 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3988 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3989 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3990 dmae->comp_addr_hi = 0;
3991 dmae->comp_val = 1;
3992
3993 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3994 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3995 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3996 dmae->opcode = opcode;
3997 dmae->src_addr_lo = (mac_addr +
3998 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3999 dmae->src_addr_hi = 0;
4000 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004001 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004002 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004003 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004004 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4005 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4006 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4007 dmae->comp_addr_hi = 0;
4008 dmae->comp_val = 1;
4009
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07004010 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004011
4012 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4013
4014 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4015 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4016 dmae->opcode = opcode;
4017 dmae->src_addr_lo = (mac_addr +
4018 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4019 dmae->src_addr_hi = 0;
4020 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4021 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4022 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4023 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4024 dmae->comp_addr_hi = 0;
4025 dmae->comp_val = 1;
4026
4027 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4028 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029 dmae->opcode = opcode;
4030 dmae->src_addr_lo = (mac_addr +
4031 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4032 dmae->src_addr_hi = 0;
4033 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004034 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004035 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004036 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004037 dmae->len = 1;
4038 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039 dmae->comp_addr_hi = 0;
4040 dmae->comp_val = 1;
4041
4042 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4043 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4044 dmae->opcode = opcode;
4045 dmae->src_addr_lo = (mac_addr +
4046 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4047 dmae->src_addr_hi = 0;
4048 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004049 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004050 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004051 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004052 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4053 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4054 dmae->comp_addr_hi = 0;
4055 dmae->comp_val = 1;
4056 }
4057
4058 /* NIG */
4059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004060 dmae->opcode = opcode;
4061 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4062 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4063 dmae->src_addr_hi = 0;
4064 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4065 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4066 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068 dmae->comp_addr_hi = 0;
4069 dmae->comp_val = 1;
4070
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4074 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4077 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4079 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4080 dmae->len = (2*sizeof(u32)) >> 2;
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4083 dmae->comp_val = 1;
4084
4085 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004086 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4087 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4088 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4089#ifdef __BIG_ENDIAN
4090 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4091#else
4092 DMAE_CMD_ENDIANITY_DW_SWAP |
4093#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004094 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4095 (vn << DMAE_CMD_E1HVN_SHIFT));
4096 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4097 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004098 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004099 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4100 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4101 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4102 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4103 dmae->len = (2*sizeof(u32)) >> 2;
4104 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4105 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4106 dmae->comp_val = DMAE_COMP_VAL;
4107
4108 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004109}
4110
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004111static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004112{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004113 struct dmae_command *dmae = &bp->stats_dmae;
4114 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004115
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004116 /* sanity */
4117 if (!bp->func_stx) {
4118 BNX2X_ERR("BUG!\n");
4119 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004120 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004121
4122 bp->executer_idx = 0;
4123 memset(dmae, 0, sizeof(struct dmae_command));
4124
4125 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4126 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4127 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4128#ifdef __BIG_ENDIAN
4129 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4130#else
4131 DMAE_CMD_ENDIANITY_DW_SWAP |
4132#endif
4133 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4134 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4135 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4136 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4137 dmae->dst_addr_lo = bp->func_stx >> 2;
4138 dmae->dst_addr_hi = 0;
4139 dmae->len = sizeof(struct host_func_stats) >> 2;
4140 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4141 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4142 dmae->comp_val = DMAE_COMP_VAL;
4143
4144 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004145}
4146
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004147static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004148{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004149 if (bp->port.pmf)
4150 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004151
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004152 else if (bp->func_stx)
4153 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004154
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004155 bnx2x_hw_stats_post(bp);
4156 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004157}
4158
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004159static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004160{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004161 bnx2x_stats_comp(bp);
4162 bnx2x_stats_pmf_update(bp);
4163 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004164}
4165
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004166static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004167{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004168 bnx2x_stats_comp(bp);
4169 bnx2x_stats_start(bp);
4170}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004171
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004172static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4173{
4174 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4175 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004176 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004177 struct {
4178 u32 lo;
4179 u32 hi;
4180 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004181
4182 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4183 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4184 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4185 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4186 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4187 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004188 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004189 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004190 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004191 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4192 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4193 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4194 UPDATE_STAT64(tx_stat_gt127,
4195 tx_stat_etherstatspkts65octetsto127octets);
4196 UPDATE_STAT64(tx_stat_gt255,
4197 tx_stat_etherstatspkts128octetsto255octets);
4198 UPDATE_STAT64(tx_stat_gt511,
4199 tx_stat_etherstatspkts256octetsto511octets);
4200 UPDATE_STAT64(tx_stat_gt1023,
4201 tx_stat_etherstatspkts512octetsto1023octets);
4202 UPDATE_STAT64(tx_stat_gt1518,
4203 tx_stat_etherstatspkts1024octetsto1522octets);
4204 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4205 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4206 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4207 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4208 UPDATE_STAT64(tx_stat_gterr,
4209 tx_stat_dot3statsinternalmactransmiterrors);
4210 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004211
4212 estats->pause_frames_received_hi =
4213 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4214 estats->pause_frames_received_lo =
4215 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4216
4217 estats->pause_frames_sent_hi =
4218 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4219 estats->pause_frames_sent_lo =
4220 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004221}
4222
4223static void bnx2x_emac_stats_update(struct bnx2x *bp)
4224{
4225 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4226 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004227 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004228
4229 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4230 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4231 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4232 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4233 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4234 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4235 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4236 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4237 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4238 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4239 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4240 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4241 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4242 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4243 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4244 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4245 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4246 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4247 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4248 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4249 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4250 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4251 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4252 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4253 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4254 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4255 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4256 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4257 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4258 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4259 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004260
4261 estats->pause_frames_received_hi =
4262 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4263 estats->pause_frames_received_lo =
4264 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4265 ADD_64(estats->pause_frames_received_hi,
4266 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4267 estats->pause_frames_received_lo,
4268 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4269
4270 estats->pause_frames_sent_hi =
4271 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4272 estats->pause_frames_sent_lo =
4273 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4274 ADD_64(estats->pause_frames_sent_hi,
4275 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4276 estats->pause_frames_sent_lo,
4277 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004278}
4279
4280static int bnx2x_hw_stats_update(struct bnx2x *bp)
4281{
4282 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4283 struct nig_stats *old = &(bp->port.old_nig_stats);
4284 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4285 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004286 struct {
4287 u32 lo;
4288 u32 hi;
4289 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004290 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004291
4292 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4293 bnx2x_bmac_stats_update(bp);
4294
4295 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4296 bnx2x_emac_stats_update(bp);
4297
4298 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00004299 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004300 return -1;
4301 }
4302
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004303 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4304 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004305 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4306 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004307
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004308 UPDATE_STAT64_NIG(egress_mac_pkt0,
4309 etherstatspkts1024octetsto1522octets);
4310 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004311
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004312 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004313
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004314 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4315 sizeof(struct mac_stx));
4316 estats->brb_drop_hi = pstats->brb_drop_hi;
4317 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004318
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004319 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004320
Eilon Greensteinde832a52009-02-12 08:36:33 +00004321 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4322 if (nig_timer_max != estats->nig_timer_max) {
4323 estats->nig_timer_max = nig_timer_max;
4324 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4325 }
4326
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004327 return 0;
4328}
4329
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004330static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004331{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004332 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004333 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004334 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004335 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4336 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004337 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004338
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004339 memcpy(&(fstats->total_bytes_received_hi),
4340 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004341 sizeof(struct host_func_stats) - 2*sizeof(u32));
4342 estats->error_bytes_received_hi = 0;
4343 estats->error_bytes_received_lo = 0;
4344 estats->etherstatsoverrsizepkts_hi = 0;
4345 estats->etherstatsoverrsizepkts_lo = 0;
4346 estats->no_buff_discard_hi = 0;
4347 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004348
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004349 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004350 struct bnx2x_fastpath *fp = &bp->fp[i];
4351 int cl_id = fp->cl_id;
4352 struct tstorm_per_client_stats *tclient =
4353 &stats->tstorm_common.client_statistics[cl_id];
4354 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4355 struct ustorm_per_client_stats *uclient =
4356 &stats->ustorm_common.client_statistics[cl_id];
4357 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4358 struct xstorm_per_client_stats *xclient =
4359 &stats->xstorm_common.client_statistics[cl_id];
4360 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4361 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4362 u32 diff;
4363
4364 /* are storm stats valid? */
4365 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4366 bp->stats_counter) {
4367 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4368 " xstorm counter (%d) != stats_counter (%d)\n",
4369 i, xclient->stats_counter, bp->stats_counter);
4370 return -1;
4371 }
4372 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4373 bp->stats_counter) {
4374 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4375 " tstorm counter (%d) != stats_counter (%d)\n",
4376 i, tclient->stats_counter, bp->stats_counter);
4377 return -2;
4378 }
4379 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4380 bp->stats_counter) {
4381 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4382 " ustorm counter (%d) != stats_counter (%d)\n",
4383 i, uclient->stats_counter, bp->stats_counter);
4384 return -4;
4385 }
4386
4387 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004388 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004389 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004390 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4391
4392 ADD_64(qstats->total_bytes_received_hi,
4393 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4394 qstats->total_bytes_received_lo,
4395 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4396
4397 ADD_64(qstats->total_bytes_received_hi,
4398 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4399 qstats->total_bytes_received_lo,
4400 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4401
4402 qstats->valid_bytes_received_hi =
4403 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004404 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004405 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004406
Eilon Greensteinde832a52009-02-12 08:36:33 +00004407 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004408 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004409 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004410 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004411
4412 ADD_64(qstats->total_bytes_received_hi,
4413 qstats->error_bytes_received_hi,
4414 qstats->total_bytes_received_lo,
4415 qstats->error_bytes_received_lo);
4416
4417 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4418 total_unicast_packets_received);
4419 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4420 total_multicast_packets_received);
4421 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4422 total_broadcast_packets_received);
4423 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4424 etherstatsoverrsizepkts);
4425 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4426
4427 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4428 total_unicast_packets_received);
4429 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4430 total_multicast_packets_received);
4431 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4432 total_broadcast_packets_received);
4433 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4434 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4435 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4436
4437 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004438 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004439 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004440 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4441
4442 ADD_64(qstats->total_bytes_transmitted_hi,
4443 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4444 qstats->total_bytes_transmitted_lo,
4445 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4446
4447 ADD_64(qstats->total_bytes_transmitted_hi,
4448 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4449 qstats->total_bytes_transmitted_lo,
4450 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004451
4452 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4453 total_unicast_packets_transmitted);
4454 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4455 total_multicast_packets_transmitted);
4456 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4457 total_broadcast_packets_transmitted);
4458
4459 old_tclient->checksum_discard = tclient->checksum_discard;
4460 old_tclient->ttl0_discard = tclient->ttl0_discard;
4461
4462 ADD_64(fstats->total_bytes_received_hi,
4463 qstats->total_bytes_received_hi,
4464 fstats->total_bytes_received_lo,
4465 qstats->total_bytes_received_lo);
4466 ADD_64(fstats->total_bytes_transmitted_hi,
4467 qstats->total_bytes_transmitted_hi,
4468 fstats->total_bytes_transmitted_lo,
4469 qstats->total_bytes_transmitted_lo);
4470 ADD_64(fstats->total_unicast_packets_received_hi,
4471 qstats->total_unicast_packets_received_hi,
4472 fstats->total_unicast_packets_received_lo,
4473 qstats->total_unicast_packets_received_lo);
4474 ADD_64(fstats->total_multicast_packets_received_hi,
4475 qstats->total_multicast_packets_received_hi,
4476 fstats->total_multicast_packets_received_lo,
4477 qstats->total_multicast_packets_received_lo);
4478 ADD_64(fstats->total_broadcast_packets_received_hi,
4479 qstats->total_broadcast_packets_received_hi,
4480 fstats->total_broadcast_packets_received_lo,
4481 qstats->total_broadcast_packets_received_lo);
4482 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4483 qstats->total_unicast_packets_transmitted_hi,
4484 fstats->total_unicast_packets_transmitted_lo,
4485 qstats->total_unicast_packets_transmitted_lo);
4486 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4487 qstats->total_multicast_packets_transmitted_hi,
4488 fstats->total_multicast_packets_transmitted_lo,
4489 qstats->total_multicast_packets_transmitted_lo);
4490 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4491 qstats->total_broadcast_packets_transmitted_hi,
4492 fstats->total_broadcast_packets_transmitted_lo,
4493 qstats->total_broadcast_packets_transmitted_lo);
4494 ADD_64(fstats->valid_bytes_received_hi,
4495 qstats->valid_bytes_received_hi,
4496 fstats->valid_bytes_received_lo,
4497 qstats->valid_bytes_received_lo);
4498
4499 ADD_64(estats->error_bytes_received_hi,
4500 qstats->error_bytes_received_hi,
4501 estats->error_bytes_received_lo,
4502 qstats->error_bytes_received_lo);
4503 ADD_64(estats->etherstatsoverrsizepkts_hi,
4504 qstats->etherstatsoverrsizepkts_hi,
4505 estats->etherstatsoverrsizepkts_lo,
4506 qstats->etherstatsoverrsizepkts_lo);
4507 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4508 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4509 }
4510
4511 ADD_64(fstats->total_bytes_received_hi,
4512 estats->rx_stat_ifhcinbadoctets_hi,
4513 fstats->total_bytes_received_lo,
4514 estats->rx_stat_ifhcinbadoctets_lo);
4515
4516 memcpy(estats, &(fstats->total_bytes_received_hi),
4517 sizeof(struct host_func_stats) - 2*sizeof(u32));
4518
4519 ADD_64(estats->etherstatsoverrsizepkts_hi,
4520 estats->rx_stat_dot3statsframestoolong_hi,
4521 estats->etherstatsoverrsizepkts_lo,
4522 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004523 ADD_64(estats->error_bytes_received_hi,
4524 estats->rx_stat_ifhcinbadoctets_hi,
4525 estats->error_bytes_received_lo,
4526 estats->rx_stat_ifhcinbadoctets_lo);
4527
Eilon Greensteinde832a52009-02-12 08:36:33 +00004528 if (bp->port.pmf) {
4529 estats->mac_filter_discard =
4530 le32_to_cpu(tport->mac_filter_discard);
4531 estats->xxoverflow_discard =
4532 le32_to_cpu(tport->xxoverflow_discard);
4533 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004534 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004535 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4536 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004537
4538 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4539
Eilon Greensteinde832a52009-02-12 08:36:33 +00004540 bp->stats_pending = 0;
4541
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004542 return 0;
4543}
4544
4545static void bnx2x_net_stats_update(struct bnx2x *bp)
4546{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004548 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004549 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550
4551 nstats->rx_packets =
4552 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4553 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4554 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4555
4556 nstats->tx_packets =
4557 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4558 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4559 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4560
Eilon Greensteinde832a52009-02-12 08:36:33 +00004561 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004562
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004563 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004564
Eilon Greensteinde832a52009-02-12 08:36:33 +00004565 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004566 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004567 nstats->rx_dropped +=
4568 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4569
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004570 nstats->tx_dropped = 0;
4571
4572 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004573 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004574
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004575 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004576 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004577
4578 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004579 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4580 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4581 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4582 bnx2x_hilo(&estats->brb_truncate_hi);
4583 nstats->rx_crc_errors =
4584 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4585 nstats->rx_frame_errors =
4586 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4587 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004588 nstats->rx_missed_errors = estats->xxoverflow_discard;
4589
4590 nstats->rx_errors = nstats->rx_length_errors +
4591 nstats->rx_over_errors +
4592 nstats->rx_crc_errors +
4593 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004594 nstats->rx_fifo_errors +
4595 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004596
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004597 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004598 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4599 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4600 nstats->tx_carrier_errors =
4601 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004602 nstats->tx_fifo_errors = 0;
4603 nstats->tx_heartbeat_errors = 0;
4604 nstats->tx_window_errors = 0;
4605
4606 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004607 nstats->tx_carrier_errors +
4608 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4609}
4610
4611static void bnx2x_drv_stats_update(struct bnx2x *bp)
4612{
4613 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4614 int i;
4615
4616 estats->driver_xoff = 0;
4617 estats->rx_err_discard_pkt = 0;
4618 estats->rx_skb_alloc_failed = 0;
4619 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004620 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004621 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4622
4623 estats->driver_xoff += qstats->driver_xoff;
4624 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4625 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4626 estats->hw_csum_err += qstats->hw_csum_err;
4627 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004628}
4629
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004630static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004631{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004632 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004633
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004634 if (*stats_comp != DMAE_COMP_VAL)
4635 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004636
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004637 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004638 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004639
Eilon Greensteinde832a52009-02-12 08:36:33 +00004640 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4641 BNX2X_ERR("storm stats were not updated for 3 times\n");
4642 bnx2x_panic();
4643 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004644 }
4645
Eilon Greensteinde832a52009-02-12 08:36:33 +00004646 bnx2x_net_stats_update(bp);
4647 bnx2x_drv_stats_update(bp);
4648
Joe Perches7995c642010-02-17 15:01:52 +00004649 if (netif_msg_timer(bp)) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004650 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004651 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004652 struct tstorm_per_client_stats *old_tclient =
4653 &bp->fp->old_tclient;
4654 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004656 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004658
Joe Perches7995c642010-02-17 15:01:52 +00004659 netdev_printk(KERN_DEBUG, bp->dev, "\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004660 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4661 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004662 bnx2x_tx_avail(fp0_tx),
4663 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004664 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4665 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004666 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4667 fp0_rx->rx_comp_cons),
4668 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004669 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4670 "brb truncate %u\n",
4671 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4672 qstats->driver_xoff,
4673 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004674 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004675 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004676 "mac_discard %u mac_filter_discard %u "
4677 "xxovrflow_discard %u brb_truncate_discard %u "
4678 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004679 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004680 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4681 bnx2x_hilo(&qstats->no_buff_discard_hi),
4682 estats->mac_discard, estats->mac_filter_discard,
4683 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004684 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685
4686 for_each_queue(bp, i) {
4687 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4688 bnx2x_fp(bp, i, tx_pkt),
4689 bnx2x_fp(bp, i, rx_pkt),
4690 bnx2x_fp(bp, i, rx_calls));
4691 }
4692 }
4693
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004694 bnx2x_hw_stats_post(bp);
4695 bnx2x_storm_stats_post(bp);
4696}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004698static void bnx2x_port_stats_stop(struct bnx2x *bp)
4699{
4700 struct dmae_command *dmae;
4701 u32 opcode;
4702 int loader_idx = PMF_DMAE_C(bp);
4703 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004704
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004705 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004706
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004707 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4708 DMAE_CMD_C_ENABLE |
4709 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004710#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004711 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004712#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004713 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004715 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4716 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4717
4718 if (bp->port.port_stx) {
4719
4720 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4721 if (bp->func_stx)
4722 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4723 else
4724 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4725 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4726 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4727 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004728 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004729 dmae->len = sizeof(struct host_port_stats) >> 2;
4730 if (bp->func_stx) {
4731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4732 dmae->comp_addr_hi = 0;
4733 dmae->comp_val = 1;
4734 } else {
4735 dmae->comp_addr_lo =
4736 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4737 dmae->comp_addr_hi =
4738 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4739 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004740
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004741 *stats_comp = 0;
4742 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743 }
4744
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004745 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004746
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004747 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4748 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4749 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4750 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4751 dmae->dst_addr_lo = bp->func_stx >> 2;
4752 dmae->dst_addr_hi = 0;
4753 dmae->len = sizeof(struct host_func_stats) >> 2;
4754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4756 dmae->comp_val = DMAE_COMP_VAL;
4757
4758 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004759 }
4760}
4761
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004762static void bnx2x_stats_stop(struct bnx2x *bp)
4763{
4764 int update = 0;
4765
4766 bnx2x_stats_comp(bp);
4767
4768 if (bp->port.pmf)
4769 update = (bnx2x_hw_stats_update(bp) == 0);
4770
4771 update |= (bnx2x_storm_stats_update(bp) == 0);
4772
4773 if (update) {
4774 bnx2x_net_stats_update(bp);
4775
4776 if (bp->port.pmf)
4777 bnx2x_port_stats_stop(bp);
4778
4779 bnx2x_hw_stats_post(bp);
4780 bnx2x_stats_comp(bp);
4781 }
4782}
4783
4784static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4785{
4786}
4787
4788static const struct {
4789 void (*action)(struct bnx2x *bp);
4790 enum bnx2x_stats_state next_state;
4791} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4792/* state event */
4793{
4794/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4795/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4796/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4797/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4798},
4799{
4800/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4801/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4802/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4803/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4804}
4805};
4806
4807static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4808{
4809 enum bnx2x_stats_state state = bp->stats_state;
4810
4811 bnx2x_stats_stm[state][event].action(bp);
4812 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4813
Eilon Greenstein89246652009-08-12 08:23:56 +00004814 /* Make sure the state has been "changed" */
4815 smp_wmb();
4816
Joe Perches7995c642010-02-17 15:01:52 +00004817 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004818 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4819 state, event, bp->stats_state);
4820}
4821
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004822static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4823{
4824 struct dmae_command *dmae;
4825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4826
4827 /* sanity */
4828 if (!bp->port.pmf || !bp->port.port_stx) {
4829 BNX2X_ERR("BUG!\n");
4830 return;
4831 }
4832
4833 bp->executer_idx = 0;
4834
4835 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4836 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4837 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4838 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4839#ifdef __BIG_ENDIAN
4840 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4841#else
4842 DMAE_CMD_ENDIANITY_DW_SWAP |
4843#endif
4844 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4845 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4846 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4847 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4848 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4849 dmae->dst_addr_hi = 0;
4850 dmae->len = sizeof(struct host_port_stats) >> 2;
4851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4852 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4853 dmae->comp_val = DMAE_COMP_VAL;
4854
4855 *stats_comp = 0;
4856 bnx2x_hw_stats_post(bp);
4857 bnx2x_stats_comp(bp);
4858}
4859
4860static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4861{
4862 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4863 int port = BP_PORT(bp);
4864 int func;
4865 u32 func_stx;
4866
4867 /* sanity */
4868 if (!bp->port.pmf || !bp->func_stx) {
4869 BNX2X_ERR("BUG!\n");
4870 return;
4871 }
4872
4873 /* save our func_stx */
4874 func_stx = bp->func_stx;
4875
4876 for (vn = VN_0; vn < vn_max; vn++) {
4877 func = 2*vn + port;
4878
4879 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4880 bnx2x_func_stats_init(bp);
4881 bnx2x_hw_stats_post(bp);
4882 bnx2x_stats_comp(bp);
4883 }
4884
4885 /* restore our func_stx */
4886 bp->func_stx = func_stx;
4887}
4888
4889static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4890{
4891 struct dmae_command *dmae = &bp->stats_dmae;
4892 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4893
4894 /* sanity */
4895 if (!bp->func_stx) {
4896 BNX2X_ERR("BUG!\n");
4897 return;
4898 }
4899
4900 bp->executer_idx = 0;
4901 memset(dmae, 0, sizeof(struct dmae_command));
4902
4903 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4904 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4905 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4906#ifdef __BIG_ENDIAN
4907 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4908#else
4909 DMAE_CMD_ENDIANITY_DW_SWAP |
4910#endif
4911 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4912 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4913 dmae->src_addr_lo = bp->func_stx >> 2;
4914 dmae->src_addr_hi = 0;
4915 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4916 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4917 dmae->len = sizeof(struct host_func_stats) >> 2;
4918 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4919 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4920 dmae->comp_val = DMAE_COMP_VAL;
4921
4922 *stats_comp = 0;
4923 bnx2x_hw_stats_post(bp);
4924 bnx2x_stats_comp(bp);
4925}
4926
4927static void bnx2x_stats_init(struct bnx2x *bp)
4928{
4929 int port = BP_PORT(bp);
4930 int func = BP_FUNC(bp);
4931 int i;
4932
4933 bp->stats_pending = 0;
4934 bp->executer_idx = 0;
4935 bp->stats_counter = 0;
4936
4937 /* port and func stats for management */
4938 if (!BP_NOMCP(bp)) {
4939 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4940 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4941
4942 } else {
4943 bp->port.port_stx = 0;
4944 bp->func_stx = 0;
4945 }
4946 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4947 bp->port.port_stx, bp->func_stx);
4948
4949 /* port stats */
4950 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4951 bp->port.old_nig_stats.brb_discard =
4952 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4953 bp->port.old_nig_stats.brb_truncate =
4954 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4955 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4956 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4957 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4958 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4959
4960 /* function stats */
4961 for_each_queue(bp, i) {
4962 struct bnx2x_fastpath *fp = &bp->fp[i];
4963
4964 memset(&fp->old_tclient, 0,
4965 sizeof(struct tstorm_per_client_stats));
4966 memset(&fp->old_uclient, 0,
4967 sizeof(struct ustorm_per_client_stats));
4968 memset(&fp->old_xclient, 0,
4969 sizeof(struct xstorm_per_client_stats));
4970 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4971 }
4972
4973 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4974 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4975
4976 bp->stats_state = STATS_STATE_DISABLED;
4977
4978 if (bp->port.pmf) {
4979 if (bp->port.port_stx)
4980 bnx2x_port_stats_base_init(bp);
4981
4982 if (bp->func_stx)
4983 bnx2x_func_stats_base_init(bp);
4984
4985 } else if (bp->func_stx)
4986 bnx2x_func_stats_base_update(bp);
4987}
4988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004989static void bnx2x_timer(unsigned long data)
4990{
4991 struct bnx2x *bp = (struct bnx2x *) data;
4992
4993 if (!netif_running(bp->dev))
4994 return;
4995
4996 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004997 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004998
4999 if (poll) {
5000 struct bnx2x_fastpath *fp = &bp->fp[0];
5001 int rc;
5002
Eilon Greenstein7961f792009-03-02 07:59:31 +00005003 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005004 rc = bnx2x_rx_int(fp, 1000);
5005 }
5006
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005007 if (!BP_NOMCP(bp)) {
5008 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005009 u32 drv_pulse;
5010 u32 mcp_pulse;
5011
5012 ++bp->fw_drv_pulse_wr_seq;
5013 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5014 /* TBD - add SYSTEM_TIME */
5015 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005016 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005017
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005018 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005019 MCP_PULSE_SEQ_MASK);
5020 /* The delta between driver pulse and mcp response
5021 * should be 1 (before mcp response) or 0 (after mcp response)
5022 */
5023 if ((drv_pulse != mcp_pulse) &&
5024 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5025 /* someone lost a heartbeat... */
5026 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5027 drv_pulse, mcp_pulse);
5028 }
5029 }
5030
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07005031 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005032 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005033
Eliezer Tamirf1410642008-02-28 11:51:50 -08005034timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005035 mod_timer(&bp->timer, jiffies + bp->current_interval);
5036}
5037
5038/* end of Statistics */
5039
5040/* nic init */
5041
5042/*
5043 * nic init service functions
5044 */
5045
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005046static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005047{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005048 int port = BP_PORT(bp);
5049
Eilon Greensteinca003922009-08-12 22:53:28 -07005050 /* "CSTORM" */
5051 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5052 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5053 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5054 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5055 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5056 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005057}
5058
Eilon Greenstein5c862842008-08-13 15:51:48 -07005059static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5060 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005061{
5062 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005063 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005064 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005065 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005066
5067 /* USTORM */
5068 section = ((u64)mapping) + offsetof(struct host_status_block,
5069 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005070 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005071
Eilon Greensteinca003922009-08-12 22:53:28 -07005072 REG_WR(bp, BAR_CSTRORM_INTMEM +
5073 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5074 REG_WR(bp, BAR_CSTRORM_INTMEM +
5075 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005076 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07005077 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5078 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005079
5080 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07005081 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5082 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005083
5084 /* CSTORM */
5085 section = ((u64)mapping) + offsetof(struct host_status_block,
5086 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005087 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005088
5089 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005090 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005091 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005092 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005093 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005094 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07005095 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005096
5097 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5098 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005099 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005101 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5102}
5103
5104static void bnx2x_zero_def_sb(struct bnx2x *bp)
5105{
5106 int func = BP_FUNC(bp);
5107
Eilon Greensteinca003922009-08-12 22:53:28 -07005108 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005109 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5110 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07005111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5113 sizeof(struct cstorm_def_status_block_u)/4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5116 sizeof(struct cstorm_def_status_block_c)/4);
5117 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00005118 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5119 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005120}
5121
5122static void bnx2x_init_def_sb(struct bnx2x *bp,
5123 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005124 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005125{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005126 int port = BP_PORT(bp);
5127 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005128 int index, val, reg_offset;
5129 u64 section;
5130
5131 /* ATTN */
5132 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5133 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005134 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005135
Eliezer Tamir49d66772008-02-28 11:53:13 -08005136 bp->attn_state = 0;
5137
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5139 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5140
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005142 bp->attn_group[index].sig[0] = REG_RD(bp,
5143 reg_offset + 0x10*index);
5144 bp->attn_group[index].sig[1] = REG_RD(bp,
5145 reg_offset + 0x4 + 0x10*index);
5146 bp->attn_group[index].sig[2] = REG_RD(bp,
5147 reg_offset + 0x8 + 0x10*index);
5148 bp->attn_group[index].sig[3] = REG_RD(bp,
5149 reg_offset + 0xc + 0x10*index);
5150 }
5151
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005152 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5153 HC_REG_ATTN_MSG0_ADDR_L);
5154
5155 REG_WR(bp, reg_offset, U64_LO(section));
5156 REG_WR(bp, reg_offset + 4, U64_HI(section));
5157
5158 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5159
5160 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005161 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162 REG_WR(bp, reg_offset, val);
5163
5164 /* USTORM */
5165 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5166 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005167 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005168
Eilon Greensteinca003922009-08-12 22:53:28 -07005169 REG_WR(bp, BAR_CSTRORM_INTMEM +
5170 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5171 REG_WR(bp, BAR_CSTRORM_INTMEM +
5172 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005173 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07005174 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005176
5177 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07005178 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5179 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005180
5181 /* CSTORM */
5182 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5183 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005184 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005185
5186 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005187 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005188 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005189 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005190 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07005191 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07005192 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005193
5194 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5195 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005196 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005197
5198 /* TSTORM */
5199 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5200 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005201 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005202
5203 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005204 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005205 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005206 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005207 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07005208 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005209 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005210
5211 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5212 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005213 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005214
5215 /* XSTORM */
5216 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5217 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005218 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005219
5220 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005221 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005222 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005223 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005224 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07005225 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005226 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005227
5228 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5229 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005230 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005232 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005233 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07005234
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005235 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005236}
5237
5238static void bnx2x_update_coalesce(struct bnx2x *bp)
5239{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005240 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005241 int i;
5242
5243 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005244 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005245
5246 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07005247 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5248 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5249 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005250 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07005251 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5252 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5253 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005254 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005255
5256 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5257 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005258 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5259 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005260 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005261 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07005262 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5263 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00005264 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005265 }
5266}
5267
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005268static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5269 struct bnx2x_fastpath *fp, int last)
5270{
5271 int i;
5272
5273 for (i = 0; i < last; i++) {
5274 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5275 struct sk_buff *skb = rx_buf->skb;
5276
5277 if (skb == NULL) {
5278 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5279 continue;
5280 }
5281
5282 if (fp->tpa_state[i] == BNX2X_TPA_START)
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005283 dma_unmap_single(&bp->pdev->dev,
5284 dma_unmap_addr(rx_buf, mapping),
5285 bp->rx_buf_size, DMA_FROM_DEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005286
5287 dev_kfree_skb(skb);
5288 rx_buf->skb = NULL;
5289 }
5290}
5291
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292static void bnx2x_init_rx_rings(struct bnx2x *bp)
5293{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005294 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07005295 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5296 ETH_MAX_AGGREGATION_QUEUES_E1H;
5297 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005298 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005299
Eilon Greenstein87942b42009-02-12 08:36:49 +00005300 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00005301 DP(NETIF_MSG_IFUP,
5302 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005304 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005305
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005306 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07005307 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005308
Eilon Greenstein32626232008-08-13 15:51:07 -07005309 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005310 fp->tpa_pool[i].skb =
5311 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5312 if (!fp->tpa_pool[i].skb) {
5313 BNX2X_ERR("Failed to allocate TPA "
5314 "skb pool for queue[%d] - "
5315 "disabling TPA on this "
5316 "queue!\n", j);
5317 bnx2x_free_tpa_pool(bp, fp, i);
5318 fp->disable_tpa = 1;
5319 break;
5320 }
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005321 dma_unmap_addr_set((struct sw_rx_bd *)
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005322 &bp->fp->tpa_pool[i],
5323 mapping, 0);
5324 fp->tpa_state[i] = BNX2X_TPA_STOP;
5325 }
5326 }
5327 }
5328
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005329 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005330 struct bnx2x_fastpath *fp = &bp->fp[j];
5331
5332 fp->rx_bd_cons = 0;
5333 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005334 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005335
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005336 /* "next page" elements initialization */
5337 /* SGE ring */
5338 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5339 struct eth_rx_sge *sge;
5340
5341 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5342 sge->addr_hi =
5343 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5345 sge->addr_lo =
5346 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5347 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5348 }
5349
5350 bnx2x_init_sge_ring_bit_mask(fp);
5351
5352 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005353 for (i = 1; i <= NUM_RX_RINGS; i++) {
5354 struct eth_rx_bd *rx_bd;
5355
5356 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5357 rx_bd->addr_hi =
5358 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005359 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005360 rx_bd->addr_lo =
5361 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005362 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005363 }
5364
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005365 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005366 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5367 struct eth_rx_cqe_next_page *nextpg;
5368
5369 nextpg = (struct eth_rx_cqe_next_page *)
5370 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5371 nextpg->addr_hi =
5372 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005373 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005374 nextpg->addr_lo =
5375 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005376 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005377 }
5378
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005379 /* Allocate SGEs and initialize the ring elements */
5380 for (i = 0, ring_prod = 0;
5381 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005382
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005383 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5384 BNX2X_ERR("was only able to allocate "
5385 "%d rx sges\n", i);
5386 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5387 /* Cleanup already allocated elements */
5388 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005389 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005390 fp->disable_tpa = 1;
5391 ring_prod = 0;
5392 break;
5393 }
5394 ring_prod = NEXT_SGE_IDX(ring_prod);
5395 }
5396 fp->rx_sge_prod = ring_prod;
5397
5398 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005399 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005400 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005401 for (i = 0; i < bp->rx_ring_size; i++) {
5402 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5403 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005404 "%d rx skbs on queue[%d]\n", i, j);
5405 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005406 break;
5407 }
5408 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005409 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005410 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005411 }
5412
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005413 fp->rx_bd_prod = ring_prod;
5414 /* must not have more available CQEs than BDs */
5415 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5416 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005417 fp->rx_pkt = fp->rx_calls = 0;
5418
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005419 /* Warning!
5420 * this will generate an interrupt (to the TSTORM)
5421 * must only be done after chip is initialized
5422 */
5423 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5424 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005425 if (j != 0)
5426 continue;
5427
5428 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005429 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005430 U64_LO(fp->rx_comp_mapping));
5431 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005432 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005433 U64_HI(fp->rx_comp_mapping));
5434 }
5435}
5436
5437static void bnx2x_init_tx_ring(struct bnx2x *bp)
5438{
5439 int i, j;
5440
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005441 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005442 struct bnx2x_fastpath *fp = &bp->fp[j];
5443
5444 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005445 struct eth_tx_next_bd *tx_next_bd =
5446 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005447
Eilon Greensteinca003922009-08-12 22:53:28 -07005448 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005449 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005450 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005451 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005452 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005453 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005454 }
5455
Eilon Greensteinca003922009-08-12 22:53:28 -07005456 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5457 fp->tx_db.data.zero_fill1 = 0;
5458 fp->tx_db.data.prod = 0;
5459
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005460 fp->tx_pkt_prod = 0;
5461 fp->tx_pkt_cons = 0;
5462 fp->tx_bd_prod = 0;
5463 fp->tx_bd_cons = 0;
5464 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5465 fp->tx_pkt = 0;
5466 }
5467}
5468
5469static void bnx2x_init_sp_ring(struct bnx2x *bp)
5470{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005471 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005472
5473 spin_lock_init(&bp->spq_lock);
5474
5475 bp->spq_left = MAX_SPQ_PENDING;
5476 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005477 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5478 bp->spq_prod_bd = bp->spq;
5479 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5480
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005481 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005482 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005483 REG_WR(bp,
5484 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005485 U64_HI(bp->spq_mapping));
5486
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005487 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005488 bp->spq_prod_idx);
5489}
5490
5491static void bnx2x_init_context(struct bnx2x *bp)
5492{
5493 int i;
5494
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005495 /* Rx */
5496 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005497 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005499 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005500
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005501 context->ustorm_st_context.common.sb_index_numbers =
5502 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005503 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005504 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005505 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005506 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5507 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5508 context->ustorm_st_context.common.statistics_counter_id =
5509 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005510 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005511 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005512 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005513 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005514 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005515 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005516 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005517 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005518 if (!fp->disable_tpa) {
5519 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005520 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005521 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005522 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5523 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005524 context->ustorm_st_context.common.sge_page_base_hi =
5525 U64_HI(fp->rx_sge_mapping);
5526 context->ustorm_st_context.common.sge_page_base_lo =
5527 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005528
5529 context->ustorm_st_context.common.max_sges_for_packet =
5530 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5531 context->ustorm_st_context.common.max_sges_for_packet =
5532 ((context->ustorm_st_context.common.
5533 max_sges_for_packet + PAGES_PER_SGE - 1) &
5534 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005535 }
5536
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005537 context->ustorm_ag_context.cdu_usage =
5538 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5539 CDU_REGION_NUMBER_UCM_AG,
5540 ETH_CONNECTION_TYPE);
5541
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005542 context->xstorm_ag_context.cdu_reserved =
5543 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5544 CDU_REGION_NUMBER_XCM_AG,
5545 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005546 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005547
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005548 /* Tx */
5549 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005550 struct bnx2x_fastpath *fp = &bp->fp[i];
5551 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005552 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005553
5554 context->cstorm_st_context.sb_index_number =
5555 C_SB_ETH_TX_CQ_INDEX;
5556 context->cstorm_st_context.status_block_id = fp->sb_id;
5557
5558 context->xstorm_st_context.tx_bd_page_base_hi =
5559 U64_HI(fp->tx_desc_mapping);
5560 context->xstorm_st_context.tx_bd_page_base_lo =
5561 U64_LO(fp->tx_desc_mapping);
5562 context->xstorm_st_context.statistics_data = (fp->cl_id |
5563 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5564 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005565}
5566
5567static void bnx2x_init_ind_table(struct bnx2x *bp)
5568{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005569 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005570 int i;
5571
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005572 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005573 return;
5574
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005575 DP(NETIF_MSG_IFUP,
5576 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005577 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005578 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005579 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005580 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005581}
5582
Eliezer Tamir49d66772008-02-28 11:53:13 -08005583static void bnx2x_set_client_config(struct bnx2x *bp)
5584{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005585 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005586 int port = BP_PORT(bp);
5587 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005588
Eilon Greensteine7799c52009-01-14 21:30:27 -08005589 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005590 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005591 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5592 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005593#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005594 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005595 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005596 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005597 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5598 }
5599#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005600
5601 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005602 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5603
Eliezer Tamir49d66772008-02-28 11:53:13 -08005604 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005605 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005606 ((u32 *)&tstorm_client)[0]);
5607 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005608 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005609 ((u32 *)&tstorm_client)[1]);
5610 }
5611
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005612 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5613 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005614}
5615
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005616static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5617{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005618 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005619 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005620 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005621 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005622 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005623 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005624 /* All but management unicast packets should pass to the host as well */
5625 u32 llh_mask =
5626 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5627 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5628 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5629 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005630
Eilon Greenstein3196a882008-08-13 15:58:49 -07005631 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005632
5633 switch (mode) {
5634 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005635 tstorm_mac_filter.ucast_drop_all = mask;
5636 tstorm_mac_filter.mcast_drop_all = mask;
5637 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005638 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005639
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005640 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005641 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005642 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005643
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005644 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005645 tstorm_mac_filter.mcast_accept_all = mask;
5646 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005647 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005648
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005649 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005650 tstorm_mac_filter.ucast_accept_all = mask;
5651 tstorm_mac_filter.mcast_accept_all = mask;
5652 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005653 /* pass management unicast packets as well */
5654 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005655 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005656
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005657 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005658 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5659 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005660 }
5661
Eilon Greenstein581ce432009-07-29 00:20:04 +00005662 REG_WR(bp,
5663 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5664 llh_mask);
5665
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005666 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005668 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005669 ((u32 *)&tstorm_mac_filter)[i]);
5670
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005671/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005672 ((u32 *)&tstorm_mac_filter)[i]); */
5673 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005674
Eliezer Tamir49d66772008-02-28 11:53:13 -08005675 if (mode != BNX2X_RX_MODE_NONE)
5676 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005677}
5678
Eilon Greenstein471de712008-08-13 15:49:35 -07005679static void bnx2x_init_internal_common(struct bnx2x *bp)
5680{
5681 int i;
5682
5683 /* Zero this manually as its initialization is
5684 currently missing in the initTool */
5685 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5686 REG_WR(bp, BAR_USTRORM_INTMEM +
5687 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5688}
5689
5690static void bnx2x_init_internal_port(struct bnx2x *bp)
5691{
5692 int port = BP_PORT(bp);
5693
Eilon Greensteinca003922009-08-12 22:53:28 -07005694 REG_WR(bp,
5695 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5696 REG_WR(bp,
5697 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005698 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5699 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5700}
5701
5702static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005703{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005704 struct tstorm_eth_function_common_config tstorm_config = {0};
5705 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005706 int port = BP_PORT(bp);
5707 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005708 int i, j;
5709 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005710 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005711
5712 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005713 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005714 tstorm_config.rss_result_mask = MULTI_MASK;
5715 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005716
5717 /* Enable TPA if needed */
5718 if (bp->flags & TPA_ENABLE_FLAG)
5719 tstorm_config.config_flags |=
5720 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5721
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005722 if (IS_E1HMF(bp))
5723 tstorm_config.config_flags |=
5724 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005725
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005726 tstorm_config.leading_client_id = BP_L_ID(bp);
5727
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005728 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005729 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005730 (*(u32 *)&tstorm_config));
5731
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005732 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005733 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005734 bnx2x_set_storm_rx_mode(bp);
5735
Eilon Greensteinde832a52009-02-12 08:36:33 +00005736 for_each_queue(bp, i) {
5737 u8 cl_id = bp->fp[i].cl_id;
5738
5739 /* reset xstorm per client statistics */
5740 offset = BAR_XSTRORM_INTMEM +
5741 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5742 for (j = 0;
5743 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5744 REG_WR(bp, offset + j*4, 0);
5745
5746 /* reset tstorm per client statistics */
5747 offset = BAR_TSTRORM_INTMEM +
5748 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5749 for (j = 0;
5750 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5751 REG_WR(bp, offset + j*4, 0);
5752
5753 /* reset ustorm per client statistics */
5754 offset = BAR_USTRORM_INTMEM +
5755 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5756 for (j = 0;
5757 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5758 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005759 }
5760
5761 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005762 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005763
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005764 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005765 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005766 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005767 ((u32 *)&stats_flags)[1]);
5768
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005769 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005770 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005771 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005772 ((u32 *)&stats_flags)[1]);
5773
Eilon Greensteinde832a52009-02-12 08:36:33 +00005774 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5775 ((u32 *)&stats_flags)[0]);
5776 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5777 ((u32 *)&stats_flags)[1]);
5778
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005779 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005780 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005781 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005782 ((u32 *)&stats_flags)[1]);
5783
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005784 REG_WR(bp, BAR_XSTRORM_INTMEM +
5785 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5786 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5787 REG_WR(bp, BAR_XSTRORM_INTMEM +
5788 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5789 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5790
5791 REG_WR(bp, BAR_TSTRORM_INTMEM +
5792 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5793 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5794 REG_WR(bp, BAR_TSTRORM_INTMEM +
5795 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5796 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005797
Eilon Greensteinde832a52009-02-12 08:36:33 +00005798 REG_WR(bp, BAR_USTRORM_INTMEM +
5799 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5800 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5801 REG_WR(bp, BAR_USTRORM_INTMEM +
5802 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5803 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5804
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005805 if (CHIP_IS_E1H(bp)) {
5806 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5807 IS_E1HMF(bp));
5808 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5809 IS_E1HMF(bp));
5810 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5811 IS_E1HMF(bp));
5812 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5813 IS_E1HMF(bp));
5814
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005815 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5816 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005817 }
5818
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005819 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5820 max_agg_size =
5821 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5822 SGE_PAGE_SIZE * PAGES_PER_SGE),
5823 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005824 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005825 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005826
5827 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005828 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005829 U64_LO(fp->rx_comp_mapping));
5830 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005831 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005832 U64_HI(fp->rx_comp_mapping));
5833
Eilon Greensteinca003922009-08-12 22:53:28 -07005834 /* Next page */
5835 REG_WR(bp, BAR_USTRORM_INTMEM +
5836 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5837 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5838 REG_WR(bp, BAR_USTRORM_INTMEM +
5839 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5840 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5841
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005842 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005843 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005844 max_agg_size);
5845 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005846
Eilon Greenstein1c063282009-02-12 08:36:43 +00005847 /* dropless flow control */
5848 if (CHIP_IS_E1H(bp)) {
5849 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5850
5851 rx_pause.bd_thr_low = 250;
5852 rx_pause.cqe_thr_low = 250;
5853 rx_pause.cos = 1;
5854 rx_pause.sge_thr_low = 0;
5855 rx_pause.bd_thr_high = 350;
5856 rx_pause.cqe_thr_high = 350;
5857 rx_pause.sge_thr_high = 0;
5858
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005859 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005860 struct bnx2x_fastpath *fp = &bp->fp[i];
5861
5862 if (!fp->disable_tpa) {
5863 rx_pause.sge_thr_low = 150;
5864 rx_pause.sge_thr_high = 250;
5865 }
5866
5867
5868 offset = BAR_USTRORM_INTMEM +
5869 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5870 fp->cl_id);
5871 for (j = 0;
5872 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5873 j++)
5874 REG_WR(bp, offset + j*4,
5875 ((u32 *)&rx_pause)[j]);
5876 }
5877 }
5878
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005879 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5880
5881 /* Init rate shaping and fairness contexts */
5882 if (IS_E1HMF(bp)) {
5883 int vn;
5884
5885 /* During init there is no active link
5886 Until link is up, set link rate to 10Gbps */
5887 bp->link_vars.line_speed = SPEED_10000;
5888 bnx2x_init_port_minmax(bp);
5889
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005890 if (!BP_NOMCP(bp))
5891 bp->mf_config =
5892 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005893 bnx2x_calc_vn_weight_sum(bp);
5894
5895 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5896 bnx2x_init_vn_minmax(bp, 2*vn + port);
5897
5898 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005899 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005900 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005901
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005902 } else {
5903 /* rate shaping and fairness are disabled */
5904 DP(NETIF_MSG_IFUP,
5905 "single function mode minmax will be disabled\n");
5906 }
5907
5908
5909 /* Store it to internal memory */
5910 if (bp->port.pmf)
5911 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5912 REG_WR(bp, BAR_XSTRORM_INTMEM +
5913 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5914 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005915}
5916
Eilon Greenstein471de712008-08-13 15:49:35 -07005917static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5918{
5919 switch (load_code) {
5920 case FW_MSG_CODE_DRV_LOAD_COMMON:
5921 bnx2x_init_internal_common(bp);
5922 /* no break */
5923
5924 case FW_MSG_CODE_DRV_LOAD_PORT:
5925 bnx2x_init_internal_port(bp);
5926 /* no break */
5927
5928 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5929 bnx2x_init_internal_func(bp);
5930 break;
5931
5932 default:
5933 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5934 break;
5935 }
5936}
5937
5938static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005939{
5940 int i;
5941
5942 for_each_queue(bp, i) {
5943 struct bnx2x_fastpath *fp = &bp->fp[i];
5944
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005945 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005948 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005949#ifdef BCM_CNIC
5950 fp->sb_id = fp->cl_id + 1;
5951#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005952 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005953#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005954 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005955 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5956 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005957 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005958 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005959 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005960 }
5961
Eilon Greenstein16119782009-03-02 07:59:27 +00005962 /* ensure status block indices were read */
5963 rmb();
5964
5965
Eilon Greenstein5c862842008-08-13 15:51:48 -07005966 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5967 DEF_SB_ID);
5968 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005969 bnx2x_update_coalesce(bp);
5970 bnx2x_init_rx_rings(bp);
5971 bnx2x_init_tx_ring(bp);
5972 bnx2x_init_sp_ring(bp);
5973 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005974 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005975 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005976 bnx2x_stats_init(bp);
5977
5978 /* At this point, we are ready for interrupts */
5979 atomic_set(&bp->intr_sem, 0);
5980
5981 /* flush all before enabling interrupts */
5982 mb();
5983 mmiowb();
5984
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005985 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005986
5987 /* Check for SPIO5 */
5988 bnx2x_attn_int_deasserted0(bp,
5989 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5990 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991}
5992
5993/* end of nic init */
5994
5995/*
5996 * gzip service functions
5997 */
5998
5999static int bnx2x_gunzip_init(struct bnx2x *bp)
6000{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006001 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6002 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006003 if (bp->gunzip_buf == NULL)
6004 goto gunzip_nomem1;
6005
6006 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6007 if (bp->strm == NULL)
6008 goto gunzip_nomem2;
6009
6010 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6011 GFP_KERNEL);
6012 if (bp->strm->workspace == NULL)
6013 goto gunzip_nomem3;
6014
6015 return 0;
6016
6017gunzip_nomem3:
6018 kfree(bp->strm);
6019 bp->strm = NULL;
6020
6021gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006022 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6023 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006024 bp->gunzip_buf = NULL;
6025
6026gunzip_nomem1:
Joe Perches7995c642010-02-17 15:01:52 +00006027 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006028 return -ENOMEM;
6029}
6030
6031static void bnx2x_gunzip_end(struct bnx2x *bp)
6032{
6033 kfree(bp->strm->workspace);
6034
6035 kfree(bp->strm);
6036 bp->strm = NULL;
6037
6038 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006039 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6040 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006041 bp->gunzip_buf = NULL;
6042 }
6043}
6044
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006045static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006046{
6047 int n, rc;
6048
6049 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006050 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6051 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006052 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006053 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006054
6055 n = 10;
6056
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006057#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006058
6059 if (zbuf[3] & FNAME)
6060 while ((zbuf[n++] != 0) && (n < len));
6061
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006062 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006063 bp->strm->avail_in = len - n;
6064 bp->strm->next_out = bp->gunzip_buf;
6065 bp->strm->avail_out = FW_BUF_SIZE;
6066
6067 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6068 if (rc != Z_OK)
6069 return rc;
6070
6071 rc = zlib_inflate(bp->strm, Z_FINISH);
6072 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00006073 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6074 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006075
6076 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6077 if (bp->gunzip_outlen & 0x3)
Joe Perches7995c642010-02-17 15:01:52 +00006078 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6079 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006080 bp->gunzip_outlen >>= 2;
6081
6082 zlib_inflateEnd(bp->strm);
6083
6084 if (rc == Z_STREAM_END)
6085 return 0;
6086
6087 return rc;
6088}
6089
6090/* nic load/unload */
6091
6092/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006093 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094 */
6095
6096/* send a NIG loopback debug packet */
6097static void bnx2x_lb_pckt(struct bnx2x *bp)
6098{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006099 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006100
6101 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006102 wb_write[0] = 0x55555555;
6103 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006104 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006105 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006106
6107 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108 wb_write[0] = 0x09000000;
6109 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006110 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006112}
6113
6114/* some of the internal memories
6115 * are not directly readable from the driver
6116 * to test them we send debug packets
6117 */
6118static int bnx2x_int_mem_test(struct bnx2x *bp)
6119{
6120 int factor;
6121 int count, i;
6122 u32 val = 0;
6123
Eilon Greensteinad8d3942008-06-23 20:29:02 -07006124 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006125 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07006126 else if (CHIP_REV_IS_EMUL(bp))
6127 factor = 200;
6128 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006129 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006130
6131 DP(NETIF_MSG_HW, "start part1\n");
6132
6133 /* Disable inputs of parser neighbor blocks */
6134 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6135 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6136 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006137 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006138
6139 /* Write 0 to parser credits for CFC search request */
6140 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6141
6142 /* send Ethernet packet */
6143 bnx2x_lb_pckt(bp);
6144
6145 /* TODO do i reset NIG statistic? */
6146 /* Wait until NIG register shows 1 packet of size 0x10 */
6147 count = 1000 * factor;
6148 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006149
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006150 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6151 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006152 if (val == 0x10)
6153 break;
6154
6155 msleep(10);
6156 count--;
6157 }
6158 if (val != 0x10) {
6159 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6160 return -1;
6161 }
6162
6163 /* Wait until PRS register shows 1 packet */
6164 count = 1000 * factor;
6165 while (count) {
6166 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006167 if (val == 1)
6168 break;
6169
6170 msleep(10);
6171 count--;
6172 }
6173 if (val != 0x1) {
6174 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6175 return -2;
6176 }
6177
6178 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006179 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006180 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006181 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006182 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006183 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6184 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006185
6186 DP(NETIF_MSG_HW, "part2\n");
6187
6188 /* Disable inputs of parser neighbor blocks */
6189 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6190 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6191 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006192 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006193
6194 /* Write 0 to parser credits for CFC search request */
6195 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6196
6197 /* send 10 Ethernet packets */
6198 for (i = 0; i < 10; i++)
6199 bnx2x_lb_pckt(bp);
6200
6201 /* Wait until NIG register shows 10 + 1
6202 packets of size 11*0x10 = 0xb0 */
6203 count = 1000 * factor;
6204 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006205
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006206 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6207 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006208 if (val == 0xb0)
6209 break;
6210
6211 msleep(10);
6212 count--;
6213 }
6214 if (val != 0xb0) {
6215 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6216 return -3;
6217 }
6218
6219 /* Wait until PRS register shows 2 packets */
6220 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6221 if (val != 2)
6222 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6223
6224 /* Write 1 to parser credits for CFC search request */
6225 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6226
6227 /* Wait until PRS register shows 3 packets */
6228 msleep(10 * factor);
6229 /* Wait until NIG register shows 1 packet of size 0x10 */
6230 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6231 if (val != 3)
6232 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6233
6234 /* clear NIG EOP FIFO */
6235 for (i = 0; i < 11; i++)
6236 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6237 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6238 if (val != 1) {
6239 BNX2X_ERR("clear of NIG failed\n");
6240 return -4;
6241 }
6242
6243 /* Reset and init BRB, PRS, NIG */
6244 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6245 msleep(50);
6246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6247 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006248 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6249 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006250#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006251 /* set NIC mode */
6252 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6253#endif
6254
6255 /* Enable inputs of parser neighbor blocks */
6256 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6257 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6258 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006259 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006260
6261 DP(NETIF_MSG_HW, "done\n");
6262
6263 return 0; /* OK */
6264}
6265
6266static void enable_blocks_attention(struct bnx2x *bp)
6267{
6268 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6269 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6271 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6272 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6273 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6274 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6275 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6276 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006277/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6278/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006279 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6280 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6281 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006282/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6283/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006284 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6285 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6286 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6287 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006288/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6289/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6290 if (CHIP_REV_IS_FPGA(bp))
6291 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6292 else
6293 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006294 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6295 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6296 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006297/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6298/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006299 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6300 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006301/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6302 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006303}
6304
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006305static const struct {
6306 u32 addr;
6307 u32 mask;
6308} bnx2x_parity_mask[] = {
6309 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6310 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6311 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6312 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6313 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6314 {QM_REG_QM_PRTY_MASK, 0x0},
6315 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6316 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6317 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6318 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6319 {CDU_REG_CDU_PRTY_MASK, 0x0},
6320 {CFC_REG_CFC_PRTY_MASK, 0x0},
6321 {DBG_REG_DBG_PRTY_MASK, 0x0},
6322 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6323 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6324 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6325 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6326 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6327 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6328 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6329 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6330 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6331 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6332 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6333 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6334 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6335 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6336 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6337};
6338
6339static void enable_blocks_parity(struct bnx2x *bp)
6340{
6341 int i, mask_arr_len =
6342 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6343
6344 for (i = 0; i < mask_arr_len; i++)
6345 REG_WR(bp, bnx2x_parity_mask[i].addr,
6346 bnx2x_parity_mask[i].mask);
6347}
6348
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006349
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006350static void bnx2x_reset_common(struct bnx2x *bp)
6351{
6352 /* reset_common */
6353 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6354 0xd3ffff7f);
6355 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6356}
6357
Eilon Greenstein573f2032009-08-12 08:24:14 +00006358static void bnx2x_init_pxp(struct bnx2x *bp)
6359{
6360 u16 devctl;
6361 int r_order, w_order;
6362
6363 pci_read_config_word(bp->pdev,
6364 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6365 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6366 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6367 if (bp->mrrs == -1)
6368 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6369 else {
6370 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6371 r_order = bp->mrrs;
6372 }
6373
6374 bnx2x_init_pxp_arb(bp, r_order, w_order);
6375}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006376
6377static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6378{
6379 u32 val;
6380 u8 port;
6381 u8 is_required = 0;
6382
6383 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6384 SHARED_HW_CFG_FAN_FAILURE_MASK;
6385
6386 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6387 is_required = 1;
6388
6389 /*
6390 * The fan failure mechanism is usually related to the PHY type since
6391 * the power consumption of the board is affected by the PHY. Currently,
6392 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6393 */
6394 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6395 for (port = PORT_0; port < PORT_MAX; port++) {
6396 u32 phy_type =
6397 SHMEM_RD(bp, dev_info.port_hw_config[port].
6398 external_phy_config) &
6399 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6400 is_required |=
6401 ((phy_type ==
6402 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6403 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006404 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6405 (phy_type ==
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006406 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6407 }
6408
6409 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6410
6411 if (is_required == 0)
6412 return;
6413
6414 /* Fan failure is indicated by SPIO 5 */
6415 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6416 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6417
6418 /* set to active low mode */
6419 val = REG_RD(bp, MISC_REG_SPIO_INT);
6420 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6421 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6422 REG_WR(bp, MISC_REG_SPIO_INT, val);
6423
6424 /* enable interrupt to signal the IGU */
6425 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6426 val |= (1 << MISC_REGISTERS_SPIO_5);
6427 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6428}
6429
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006430static int bnx2x_init_common(struct bnx2x *bp)
6431{
6432 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006433#ifdef BCM_CNIC
6434 u32 wb_write[2];
6435#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006436
6437 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6438
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006439 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006440 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6441 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6442
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006443 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006444 if (CHIP_IS_E1H(bp))
6445 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6446
6447 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6448 msleep(30);
6449 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6450
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006451 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006452 if (CHIP_IS_E1(bp)) {
6453 /* enable HW interrupt from PXP on USDM overflow
6454 bit 16 on INT_MASK_0 */
6455 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006456 }
6457
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006458 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006459 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006460
6461#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006462 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6463 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6464 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6465 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6466 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006467 /* make sure this value is 0 */
6468 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006469
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006470/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6471 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6472 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6473 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6474 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006475#endif
6476
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006477 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006478#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006479 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6480 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6481 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006482#endif
6483
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006484 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6485 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006486
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006487 /* let the HW do it's magic ... */
6488 msleep(100);
6489 /* finish PXP init */
6490 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6491 if (val != 1) {
6492 BNX2X_ERR("PXP2 CFG failed\n");
6493 return -EBUSY;
6494 }
6495 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6496 if (val != 1) {
6497 BNX2X_ERR("PXP2 RD_INIT failed\n");
6498 return -EBUSY;
6499 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006500
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006501 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6502 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006503
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006504 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006505
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006506 /* clean the DMAE memory */
6507 bp->dmae_ready = 1;
6508 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006509
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006510 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6511 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6512 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6513 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006514
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006515 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6516 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6517 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6518 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6519
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006520 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006521
6522#ifdef BCM_CNIC
6523 wb_write[0] = 0;
6524 wb_write[1] = 0;
6525 for (i = 0; i < 64; i++) {
6526 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6527 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6528
6529 if (CHIP_IS_E1H(bp)) {
6530 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6531 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6532 wb_write, 2);
6533 }
6534 }
6535#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006536 /* soft reset pulse */
6537 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6538 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006539
Michael Chan37b091b2009-10-10 13:46:55 +00006540#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006541 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006542#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006543
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006544 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006545 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6546 if (!CHIP_REV_IS_SLOW(bp)) {
6547 /* enable hw interrupt from doorbell Q */
6548 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6549 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006550
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006551 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6552 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006553 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006554#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006555 /* set NIC mode */
6556 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006557#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006558 if (CHIP_IS_E1H(bp))
6559 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006560
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006561 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6562 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6564 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006565
Eilon Greensteinca003922009-08-12 22:53:28 -07006566 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6567 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6568 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6569 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006570
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006571 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6572 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6573 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6574 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006575
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006576 /* sync semi rtc */
6577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6578 0x80000000);
6579 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6580 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006581
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006582 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6583 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6584 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006585
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006586 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6587 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6588 REG_WR(bp, i, 0xc0cac01a);
6589 /* TODO: replace with something meaningful */
6590 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006591 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006592#ifdef BCM_CNIC
6593 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6594 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6595 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6596 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6597 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6598 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6599 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6600 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6601 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6602 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6603#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006604 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006605
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006606 if (sizeof(union cdu_context) != 1024)
6607 /* we currently assume that a context is 1024 bytes */
Joe Perches7995c642010-02-17 15:01:52 +00006608 pr_alert("please adjust the size of cdu_context(%ld)\n",
6609 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006610
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006611 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006612 val = (4 << 24) + (0 << 12) + 1024;
6613 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006614
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006615 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006616 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006617 /* enable context validation interrupt from CFC */
6618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6619
6620 /* set the thresholds to prevent CFC/CDU race */
6621 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006622
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006623 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6624 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006625
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006626 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006627 /* Reset PCIE errors for debug */
6628 REG_WR(bp, 0x2814, 0xffffffff);
6629 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006630
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006631 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006632 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006633 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006634 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006635
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006636 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006637 if (CHIP_IS_E1H(bp)) {
6638 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6639 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6640 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006641
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006642 if (CHIP_REV_IS_SLOW(bp))
6643 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006645 /* finish CFC init */
6646 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6647 if (val != 1) {
6648 BNX2X_ERR("CFC LL_INIT failed\n");
6649 return -EBUSY;
6650 }
6651 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6652 if (val != 1) {
6653 BNX2X_ERR("CFC AC_INIT failed\n");
6654 return -EBUSY;
6655 }
6656 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6657 if (val != 1) {
6658 BNX2X_ERR("CFC CAM_INIT failed\n");
6659 return -EBUSY;
6660 }
6661 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006662
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006663 /* read NIG statistic
6664 to see if this is our first up since powerup */
6665 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6666 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006668 /* do internal memory self test */
6669 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6670 BNX2X_ERR("internal mem self test failed\n");
6671 return -EBUSY;
6672 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006673
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006674 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006679 bp->port.need_hw_lock = 1;
6680 break;
6681
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006682 default:
6683 break;
6684 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006685
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006686 bnx2x_setup_fan_failure_detection(bp);
6687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006688 /* clear PXP2 attentions */
6689 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006691 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006692 if (CHIP_PARITY_SUPPORTED(bp))
6693 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006694
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006695 if (!BP_NOMCP(bp)) {
6696 bnx2x_acquire_phy_lock(bp);
6697 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6698 bnx2x_release_phy_lock(bp);
6699 } else
6700 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006702 return 0;
6703}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006704
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006705static int bnx2x_init_port(struct bnx2x *bp)
6706{
6707 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006708 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006709 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006710 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006711
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006712 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6713
6714 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006715
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006716 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006717 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006718
6719 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6720 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6721 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006722 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006723
Michael Chan37b091b2009-10-10 13:46:55 +00006724#ifdef BCM_CNIC
6725 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006726
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006727 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006728 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6729 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006730#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006731 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006732
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006733 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006734 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6735 /* no pause for emulation and FPGA */
6736 low = 0;
6737 high = 513;
6738 } else {
6739 if (IS_E1HMF(bp))
6740 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6741 else if (bp->dev->mtu > 4096) {
6742 if (bp->flags & ONE_PORT_FLAG)
6743 low = 160;
6744 else {
6745 val = bp->dev->mtu;
6746 /* (24*1024 + val*4)/256 */
6747 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6748 }
6749 } else
6750 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6751 high = low + 56; /* 14*1024/256 */
6752 }
6753 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6754 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6755
6756
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006757 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006758
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006759 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006760 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006761 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006762 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006763
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006764 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6765 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6767 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006768
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006769 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006770 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006771
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006772 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006773
6774 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006775 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006776
6777 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006778 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006779 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006780 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006781
6782 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006783 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006784 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006785 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786
Michael Chan37b091b2009-10-10 13:46:55 +00006787#ifdef BCM_CNIC
6788 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006790 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006791 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006792
6793 if (CHIP_IS_E1(bp)) {
6794 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6795 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6796 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006797 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006798
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006799 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006800 /* init aeu_mask_attn_func_0/1:
6801 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6802 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6803 * bits 4-7 are used for "per vn group attention" */
6804 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6805 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6806
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006807 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006808 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006809 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006810 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006811 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006812
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006813 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006814
6815 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6816
6817 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006818 /* 0x2 disable e1hov, 0x1 enable */
6819 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6820 (IS_E1HMF(bp) ? 0x1 : 0x2));
6821
Eilon Greenstein1c063282009-02-12 08:36:43 +00006822 {
6823 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6824 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6825 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6826 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006827 }
6828
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006829 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006830 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006831
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006832 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006833 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6834 {
6835 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6836
6837 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6838 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6839
6840 /* The GPIO should be swapped if the swap register is
6841 set and active */
6842 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6843 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6844
6845 /* Select function upon port-swap configuration */
6846 if (port == 0) {
6847 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6848 aeu_gpio_mask = (swap_val && swap_override) ?
6849 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6850 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6851 } else {
6852 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6853 aeu_gpio_mask = (swap_val && swap_override) ?
6854 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6855 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6856 }
6857 val = REG_RD(bp, offset);
6858 /* add GPIO3 to group */
6859 val |= aeu_gpio_mask;
6860 REG_WR(bp, offset, val);
6861 }
6862 break;
6863
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006864 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006866 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006867 {
6868 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6869 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6870 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006871 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006872 REG_WR(bp, reg_addr, val);
6873 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006874 break;
6875
6876 default:
6877 break;
6878 }
6879
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006880 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006881
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006882 return 0;
6883}
6884
6885#define ILT_PER_FUNC (768/2)
6886#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6887/* the phys address is shifted right 12 bits and has an added
6888 1=valid bit added to the 53rd bit
6889 then since this is a wide register(TM)
6890 we split it into two 32 bit writes
6891 */
6892#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6893#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6894#define PXP_ONE_ILT(x) (((x) << 10) | x)
6895#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6896
Michael Chan37b091b2009-10-10 13:46:55 +00006897#ifdef BCM_CNIC
6898#define CNIC_ILT_LINES 127
6899#define CNIC_CTX_PER_ILT 16
6900#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006901#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006902#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006903
6904static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6905{
6906 int reg;
6907
6908 if (CHIP_IS_E1H(bp))
6909 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6910 else /* E1 */
6911 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6912
6913 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6914}
6915
6916static int bnx2x_init_func(struct bnx2x *bp)
6917{
6918 int port = BP_PORT(bp);
6919 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006920 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006921 int i;
6922
6923 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6924
Eilon Greenstein8badd272009-02-12 08:36:15 +00006925 /* set MSI reconfigure capability */
6926 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6927 val = REG_RD(bp, addr);
6928 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6929 REG_WR(bp, addr, val);
6930
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006931 i = FUNC_ILT_BASE(func);
6932
6933 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6934 if (CHIP_IS_E1H(bp)) {
6935 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6936 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6937 } else /* E1 */
6938 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6939 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6940
Michael Chan37b091b2009-10-10 13:46:55 +00006941#ifdef BCM_CNIC
6942 i += 1 + CNIC_ILT_LINES;
6943 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6944 if (CHIP_IS_E1(bp))
6945 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6946 else {
6947 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6948 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6949 }
6950
6951 i++;
6952 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6953 if (CHIP_IS_E1(bp))
6954 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6955 else {
6956 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6957 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6958 }
6959
6960 i++;
6961 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6962 if (CHIP_IS_E1(bp))
6963 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6964 else {
6965 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6966 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6967 }
6968
6969 /* tell the searcher where the T2 table is */
6970 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6971
6972 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6973 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6974
6975 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6976 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6977 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6978
6979 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6980#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006981
6982 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006983 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6984 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6985 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6986 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6987 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6988 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6989 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6990 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6991 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006992
6993 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6994 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6995 }
6996
6997 /* HC init per function */
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7000
7001 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7002 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7003 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007004 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007005
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007006 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007007 REG_WR(bp, 0x2114, 0xffffffff);
7008 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007009
7010 return 0;
7011}
7012
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007013static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7014{
7015 int i, rc = 0;
7016
7017 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7018 BP_FUNC(bp), load_code);
7019
7020 bp->dmae_ready = 0;
7021 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00007022 rc = bnx2x_gunzip_init(bp);
7023 if (rc)
7024 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007025
7026 switch (load_code) {
7027 case FW_MSG_CODE_DRV_LOAD_COMMON:
7028 rc = bnx2x_init_common(bp);
7029 if (rc)
7030 goto init_hw_err;
7031 /* no break */
7032
7033 case FW_MSG_CODE_DRV_LOAD_PORT:
7034 bp->dmae_ready = 1;
7035 rc = bnx2x_init_port(bp);
7036 if (rc)
7037 goto init_hw_err;
7038 /* no break */
7039
7040 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7041 bp->dmae_ready = 1;
7042 rc = bnx2x_init_func(bp);
7043 if (rc)
7044 goto init_hw_err;
7045 break;
7046
7047 default:
7048 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7049 break;
7050 }
7051
7052 if (!BP_NOMCP(bp)) {
7053 int func = BP_FUNC(bp);
7054
7055 bp->fw_drv_pulse_wr_seq =
7056 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7057 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00007058 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7059 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007060
7061 /* this needs to be done before gunzip end */
7062 bnx2x_zero_def_sb(bp);
7063 for_each_queue(bp, i)
7064 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00007065#ifdef BCM_CNIC
7066 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7067#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007068
7069init_hw_err:
7070 bnx2x_gunzip_end(bp);
7071
7072 return rc;
7073}
7074
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007075static void bnx2x_free_mem(struct bnx2x *bp)
7076{
7077
7078#define BNX2X_PCI_FREE(x, y, size) \
7079 do { \
7080 if (x) { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007081 dma_free_coherent(&bp->pdev->dev, size, x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007082 x = NULL; \
7083 y = 0; \
7084 } \
7085 } while (0)
7086
7087#define BNX2X_FREE(x) \
7088 do { \
7089 if (x) { \
7090 vfree(x); \
7091 x = NULL; \
7092 } \
7093 } while (0)
7094
7095 int i;
7096
7097 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007098 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007099 for_each_queue(bp, i) {
7100
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007101 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007102 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7103 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007104 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007105 }
7106 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007107 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007108
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007109 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007110 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7111 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7112 bnx2x_fp(bp, i, rx_desc_mapping),
7113 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7114
7115 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7116 bnx2x_fp(bp, i, rx_comp_mapping),
7117 sizeof(struct eth_fast_path_rx_cqe) *
7118 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007119
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007120 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07007121 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007122 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7123 bnx2x_fp(bp, i, rx_sge_mapping),
7124 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7125 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007126 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007127 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007128
7129 /* fastpath tx rings: tx_buf tx_desc */
7130 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7131 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7132 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007133 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007134 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007135 /* end of fastpath */
7136
7137 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007138 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007139
7140 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007141 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007142
Michael Chan37b091b2009-10-10 13:46:55 +00007143#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007144 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7145 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7146 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7147 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00007148 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7149 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007150#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007151 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007152
7153#undef BNX2X_PCI_FREE
7154#undef BNX2X_KFREE
7155}
7156
7157static int bnx2x_alloc_mem(struct bnx2x *bp)
7158{
7159
7160#define BNX2X_PCI_ALLOC(x, y, size) \
7161 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007162 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007163 if (x == NULL) \
7164 goto alloc_mem_err; \
7165 memset(x, 0, size); \
7166 } while (0)
7167
7168#define BNX2X_ALLOC(x, size) \
7169 do { \
7170 x = vmalloc(size); \
7171 if (x == NULL) \
7172 goto alloc_mem_err; \
7173 memset(x, 0, size); \
7174 } while (0)
7175
7176 int i;
7177
7178 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007179 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007180 for_each_queue(bp, i) {
7181 bnx2x_fp(bp, i, bp) = bp;
7182
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007183 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007184 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7185 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007186 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007187 }
7188 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007189 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007190
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007191 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007192 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7193 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7194 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7195 &bnx2x_fp(bp, i, rx_desc_mapping),
7196 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7197
7198 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7199 &bnx2x_fp(bp, i, rx_comp_mapping),
7200 sizeof(struct eth_fast_path_rx_cqe) *
7201 NUM_RCQ_BD);
7202
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007203 /* SGE ring */
7204 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7205 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7206 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7207 &bnx2x_fp(bp, i, rx_sge_mapping),
7208 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007209 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007210 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007211 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007212
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007213 /* fastpath tx rings: tx_buf tx_desc */
7214 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7215 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7216 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7217 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07007218 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007219 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007220 /* end of fastpath */
7221
7222 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7223 sizeof(struct host_def_status_block));
7224
7225 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7226 sizeof(struct bnx2x_slowpath));
7227
Michael Chan37b091b2009-10-10 13:46:55 +00007228#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007229 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7230
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007231 /* allocate searcher T2 table
7232 we allocate 1/4 of alloc num for T2
7233 (which is not entered into the ILT) */
7234 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7235
Michael Chan37b091b2009-10-10 13:46:55 +00007236 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007237 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00007238 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007239
Michael Chan37b091b2009-10-10 13:46:55 +00007240 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007241 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7242
7243 /* QM queues (128*MAX_CONN) */
7244 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00007245
7246 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7247 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007248#endif
7249
7250 /* Slow path ring */
7251 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7252
7253 return 0;
7254
7255alloc_mem_err:
7256 bnx2x_free_mem(bp);
7257 return -ENOMEM;
7258
7259#undef BNX2X_PCI_ALLOC
7260#undef BNX2X_ALLOC
7261}
7262
7263static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7264{
7265 int i;
7266
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007267 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007268 struct bnx2x_fastpath *fp = &bp->fp[i];
7269
7270 u16 bd_cons = fp->tx_bd_cons;
7271 u16 sw_prod = fp->tx_pkt_prod;
7272 u16 sw_cons = fp->tx_pkt_cons;
7273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007274 while (sw_cons != sw_prod) {
7275 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7276 sw_cons++;
7277 }
7278 }
7279}
7280
7281static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7282{
7283 int i, j;
7284
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007285 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007286 struct bnx2x_fastpath *fp = &bp->fp[j];
7287
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007288 for (i = 0; i < NUM_RX_BD; i++) {
7289 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7290 struct sk_buff *skb = rx_buf->skb;
7291
7292 if (skb == NULL)
7293 continue;
7294
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007295 dma_unmap_single(&bp->pdev->dev,
7296 dma_unmap_addr(rx_buf, mapping),
7297 bp->rx_buf_size, DMA_FROM_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007298
7299 rx_buf->skb = NULL;
7300 dev_kfree_skb(skb);
7301 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007302 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07007303 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7304 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007305 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007306 }
7307}
7308
7309static void bnx2x_free_skbs(struct bnx2x *bp)
7310{
7311 bnx2x_free_tx_skbs(bp);
7312 bnx2x_free_rx_skbs(bp);
7313}
7314
7315static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7316{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007317 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007318
7319 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007320 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007321 bp->msix_table[0].vector);
7322
Michael Chan37b091b2009-10-10 13:46:55 +00007323#ifdef BCM_CNIC
7324 offset++;
7325#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007326 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007327 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007328 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007329 bnx2x_fp(bp, i, state));
7330
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007331 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007332 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007333}
7334
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007335static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007336{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007337 if (bp->flags & USING_MSIX_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007338 if (!disable_only)
7339 bnx2x_free_msix_irqs(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007340 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007341 bp->flags &= ~USING_MSIX_FLAG;
7342
Eilon Greenstein8badd272009-02-12 08:36:15 +00007343 } else if (bp->flags & USING_MSI_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007344 if (!disable_only)
7345 free_irq(bp->pdev->irq, bp->dev);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007346 pci_disable_msi(bp->pdev);
7347 bp->flags &= ~USING_MSI_FLAG;
7348
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007349 } else if (!disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350 free_irq(bp->pdev->irq, bp->dev);
7351}
7352
7353static int bnx2x_enable_msix(struct bnx2x *bp)
7354{
Eilon Greenstein8badd272009-02-12 08:36:15 +00007355 int i, rc, offset = 1;
7356 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007357
Eilon Greenstein8badd272009-02-12 08:36:15 +00007358 bp->msix_table[0].entry = igu_vec;
7359 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007360
Michael Chan37b091b2009-10-10 13:46:55 +00007361#ifdef BCM_CNIC
7362 igu_vec = BP_L_ID(bp) + offset;
7363 bp->msix_table[1].entry = igu_vec;
7364 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7365 offset++;
7366#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007367 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007368 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007369 bp->msix_table[i + offset].entry = igu_vec;
7370 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7371 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007372 }
7373
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007374 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007375 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007376 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00007377 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7378 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007379 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007380
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007381 bp->flags |= USING_MSIX_FLAG;
7382
7383 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007384}
7385
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007386static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7387{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007388 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007390 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7391 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007392 if (rc) {
7393 BNX2X_ERR("request sp irq failed\n");
7394 return -EBUSY;
7395 }
7396
Michael Chan37b091b2009-10-10 13:46:55 +00007397#ifdef BCM_CNIC
7398 offset++;
7399#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007400 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007401 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007402 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7403 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007405 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007406 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007407 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007408 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007409 bnx2x_free_msix_irqs(bp);
7410 return -EBUSY;
7411 }
7412
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007413 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007414 }
7415
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007416 i = BNX2X_NUM_QUEUES(bp);
Joe Perches7995c642010-02-17 15:01:52 +00007417 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7418 bp->msix_table[0].vector,
7419 0, bp->msix_table[offset].vector,
7420 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007421
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007422 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007423}
7424
Eilon Greenstein8badd272009-02-12 08:36:15 +00007425static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007426{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007427 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007428
Eilon Greenstein8badd272009-02-12 08:36:15 +00007429 rc = pci_enable_msi(bp->pdev);
7430 if (rc) {
7431 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7432 return -1;
7433 }
7434 bp->flags |= USING_MSI_FLAG;
7435
7436 return 0;
7437}
7438
7439static int bnx2x_req_irq(struct bnx2x *bp)
7440{
7441 unsigned long flags;
7442 int rc;
7443
7444 if (bp->flags & USING_MSI_FLAG)
7445 flags = 0;
7446 else
7447 flags = IRQF_SHARED;
7448
7449 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007450 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007451 if (!rc)
7452 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7453
7454 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007455}
7456
Yitchak Gertner65abd742008-08-25 15:26:24 -07007457static void bnx2x_napi_enable(struct bnx2x *bp)
7458{
7459 int i;
7460
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007461 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007462 napi_enable(&bnx2x_fp(bp, i, napi));
7463}
7464
7465static void bnx2x_napi_disable(struct bnx2x *bp)
7466{
7467 int i;
7468
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007469 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007470 napi_disable(&bnx2x_fp(bp, i, napi));
7471}
7472
7473static void bnx2x_netif_start(struct bnx2x *bp)
7474{
Eilon Greensteine1510702009-07-21 05:47:41 +00007475 int intr_sem;
7476
7477 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7478 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7479
7480 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007481 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007482 bnx2x_napi_enable(bp);
7483 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007484 if (bp->state == BNX2X_STATE_OPEN)
7485 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007486 }
7487 }
7488}
7489
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007490static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007491{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007492 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007493 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007494 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007495}
7496
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007497/*
7498 * Init service functions
7499 */
7500
Michael Chane665bfd2009-10-10 13:46:54 +00007501/**
7502 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7503 *
7504 * @param bp driver descriptor
7505 * @param set set or clear an entry (1 or 0)
7506 * @param mac pointer to a buffer containing a MAC
7507 * @param cl_bit_vec bit vector of clients to register a MAC for
7508 * @param cam_offset offset in a CAM to use
7509 * @param with_bcast set broadcast MAC as well
7510 */
7511static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7512 u32 cl_bit_vec, u8 cam_offset,
7513 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007514{
7515 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007516 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007517
7518 /* CAM allocation
7519 * unicasts 0-31:port0 32-63:port1
7520 * multicast 64-127:port0 128-191:port1
7521 */
Michael Chane665bfd2009-10-10 13:46:54 +00007522 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7523 config->hdr.offset = cam_offset;
7524 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007525 config->hdr.reserved1 = 0;
7526
7527 /* primary MAC */
7528 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007529 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007530 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007531 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007532 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007533 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007534 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007535 if (set)
7536 config->config_table[0].target_table_entry.flags = 0;
7537 else
7538 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007539 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007540 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007541 config->config_table[0].target_table_entry.vlan_id = 0;
7542
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007543 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7544 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007545 config->config_table[0].cam_entry.msb_mac_addr,
7546 config->config_table[0].cam_entry.middle_mac_addr,
7547 config->config_table[0].cam_entry.lsb_mac_addr);
7548
7549 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007550 if (with_bcast) {
7551 config->config_table[1].cam_entry.msb_mac_addr =
7552 cpu_to_le16(0xffff);
7553 config->config_table[1].cam_entry.middle_mac_addr =
7554 cpu_to_le16(0xffff);
7555 config->config_table[1].cam_entry.lsb_mac_addr =
7556 cpu_to_le16(0xffff);
7557 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7558 if (set)
7559 config->config_table[1].target_table_entry.flags =
7560 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7561 else
7562 CAM_INVALIDATE(config->config_table[1]);
7563 config->config_table[1].target_table_entry.clients_bit_vector =
7564 cpu_to_le32(cl_bit_vec);
7565 config->config_table[1].target_table_entry.vlan_id = 0;
7566 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007567
7568 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7569 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7570 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7571}
7572
Michael Chane665bfd2009-10-10 13:46:54 +00007573/**
7574 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7575 *
7576 * @param bp driver descriptor
7577 * @param set set or clear an entry (1 or 0)
7578 * @param mac pointer to a buffer containing a MAC
7579 * @param cl_bit_vec bit vector of clients to register a MAC for
7580 * @param cam_offset offset in a CAM to use
7581 */
7582static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7583 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007584{
7585 struct mac_configuration_cmd_e1h *config =
7586 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7587
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007588 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007589 config->hdr.offset = cam_offset;
7590 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007591 config->hdr.reserved1 = 0;
7592
7593 /* primary MAC */
7594 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007595 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007596 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007597 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007598 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007599 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007600 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007601 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007602 config->config_table[0].vlan_id = 0;
7603 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007604 if (set)
7605 config->config_table[0].flags = BP_PORT(bp);
7606 else
7607 config->config_table[0].flags =
7608 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007609
Michael Chane665bfd2009-10-10 13:46:54 +00007610 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007611 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007612 config->config_table[0].msb_mac_addr,
7613 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007614 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007615
7616 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7617 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7618 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7619}
7620
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007621static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7622 int *state_p, int poll)
7623{
7624 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007625 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007626
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007627 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7628 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007629
7630 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007631 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007632 if (poll) {
7633 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007634 /* if index is different from 0
7635 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007636 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007637 */
7638 if (idx)
7639 bnx2x_rx_int(&bp->fp[idx], 10);
7640 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007641
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007642 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007643 if (*state_p == state) {
7644#ifdef BNX2X_STOP_ON_ERROR
7645 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7646#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007647 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007648 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007649
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007650 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007651
7652 if (bp->panic)
7653 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007654 }
7655
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007656 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007657 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7658 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007659#ifdef BNX2X_STOP_ON_ERROR
7660 bnx2x_panic();
7661#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007662
Eliezer Tamir49d66772008-02-28 11:53:13 -08007663 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007664}
7665
Michael Chane665bfd2009-10-10 13:46:54 +00007666static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7667{
7668 bp->set_mac_pending++;
7669 smp_wmb();
7670
7671 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7672 (1 << bp->fp->cl_id), BP_FUNC(bp));
7673
7674 /* Wait for a completion */
7675 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7676}
7677
7678static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7679{
7680 bp->set_mac_pending++;
7681 smp_wmb();
7682
7683 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7684 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7685 1);
7686
7687 /* Wait for a completion */
7688 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7689}
7690
Michael Chan993ac7b2009-10-10 13:46:56 +00007691#ifdef BCM_CNIC
7692/**
7693 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7694 * MAC(s). This function will wait until the ramdord completion
7695 * returns.
7696 *
7697 * @param bp driver handle
7698 * @param set set or clear the CAM entry
7699 *
7700 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7701 */
7702static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7703{
7704 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7705
7706 bp->set_mac_pending++;
7707 smp_wmb();
7708
7709 /* Send a SET_MAC ramrod */
7710 if (CHIP_IS_E1(bp))
7711 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7712 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7713 1);
7714 else
7715 /* CAM allocation for E1H
7716 * unicasts: by func number
7717 * multicast: 20+FUNC*20, 20 each
7718 */
7719 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7720 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7721
7722 /* Wait for a completion when setting */
7723 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7724
7725 return 0;
7726}
7727#endif
7728
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007729static int bnx2x_setup_leading(struct bnx2x *bp)
7730{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007731 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007732
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007733 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007734 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007735
7736 /* SETUP ramrod */
7737 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7738
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007739 /* Wait for completion */
7740 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007741
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007742 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007743}
7744
7745static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7746{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007747 struct bnx2x_fastpath *fp = &bp->fp[index];
7748
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007749 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007750 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007751
Eliezer Tamir228241e2008-02-28 11:56:57 -08007752 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007753 fp->state = BNX2X_FP_STATE_OPENING;
7754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7755 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007756
7757 /* Wait for completion */
7758 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007759 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007760}
7761
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007762static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007763
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007764static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007765{
Eilon Greensteinca003922009-08-12 22:53:28 -07007766
7767 switch (bp->multi_mode) {
7768 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007769 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007770 break;
7771
7772 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007773 if (num_queues)
7774 bp->num_queues = min_t(u32, num_queues,
7775 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007776 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007777 bp->num_queues = min_t(u32, num_online_cpus(),
7778 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007779 break;
7780
7781
7782 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007783 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007784 break;
7785 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007786}
7787
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007788static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007789{
7790 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007791
Eilon Greenstein8badd272009-02-12 08:36:15 +00007792 switch (int_mode) {
7793 case INT_MODE_INTx:
7794 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007795 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007796 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007797 break;
7798
7799 case INT_MODE_MSIX:
7800 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007801 /* Set number of queues according to bp->multi_mode value */
7802 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007803
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007804 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7805 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007806
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007807 /* if we can't use MSI-X we only need one fp,
7808 * so try to enable MSI-X with the requested number of fp's
7809 * and fallback to MSI or legacy INTx with one fp
7810 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007811 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007812 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007813 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007814 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007815 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007816 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007817 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007818 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007819}
7820
Michael Chan993ac7b2009-10-10 13:46:56 +00007821#ifdef BCM_CNIC
7822static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7823static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7824#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007825
7826/* must be called with rtnl_lock */
7827static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7828{
7829 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007830 int i, rc;
7831
Eilon Greenstein8badd272009-02-12 08:36:15 +00007832#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007833 if (unlikely(bp->panic))
7834 return -EPERM;
7835#endif
7836
7837 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7838
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007839 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007840
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007841 if (bnx2x_alloc_mem(bp)) {
7842 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007843 return -ENOMEM;
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007844 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007845
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007846 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007847 bnx2x_fp(bp, i, disable_tpa) =
7848 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7849
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007850 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007851 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7852 bnx2x_poll, 128);
7853
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007854 bnx2x_napi_enable(bp);
7855
7856 if (bp->flags & USING_MSIX_FLAG) {
7857 rc = bnx2x_req_msix_irqs(bp);
7858 if (rc) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007859 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007860 goto load_error1;
7861 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007862 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007863 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007864 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007865 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7866 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007867 bnx2x_ack_int(bp);
7868 rc = bnx2x_req_irq(bp);
7869 if (rc) {
7870 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007871 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007872 goto load_error1;
7873 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007874 if (bp->flags & USING_MSI_FLAG) {
7875 bp->dev->irq = bp->pdev->irq;
Joe Perches7995c642010-02-17 15:01:52 +00007876 netdev_info(bp->dev, "using MSI IRQ %d\n",
7877 bp->pdev->irq);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007878 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007879 }
7880
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007881 /* Send LOAD_REQUEST command to MCP
7882 Returns the type of LOAD command:
7883 if it is the first port to be initialized
7884 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007885 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007886 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007887 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7888 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007889 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007890 rc = -EBUSY;
7891 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007892 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007893 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7894 rc = -EBUSY; /* other port in diagnostic mode */
7895 goto load_error2;
7896 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007897
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007898 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007899 int port = BP_PORT(bp);
7900
Eilon Greensteinf5372252009-02-12 08:38:30 +00007901 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007902 load_count[0], load_count[1], load_count[2]);
7903 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007904 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007905 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007906 load_count[0], load_count[1], load_count[2]);
7907 if (load_count[0] == 1)
7908 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007909 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007910 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7911 else
7912 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007913 }
7914
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007915 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7916 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7917 bp->port.pmf = 1;
7918 else
7919 bp->port.pmf = 0;
7920 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7921
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007922 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007923 rc = bnx2x_init_hw(bp, load_code);
7924 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007925 BNX2X_ERR("HW init failed, aborting\n");
Vladislav Zolotarovf1e1a192010-02-17 02:03:33 +00007926 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007929 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007930 }
7931
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007932 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007933 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007934
Eilon Greenstein2691d512009-08-12 08:22:08 +00007935 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7936 (bp->common.shmem2_base))
7937 SHMEM2_WR(bp, dcc_support,
7938 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7939 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7940
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007941 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007942 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007943 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7944 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007945 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007946 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007947 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007948 }
7949 }
7950
7951 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7952
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007953 rc = bnx2x_setup_leading(bp);
7954 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007955 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007956#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007957 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007958#else
7959 bp->panic = 1;
7960 return -EBUSY;
7961#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007962 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007963
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007964 if (CHIP_IS_E1H(bp))
7965 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007966 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007967 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007968 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007969
Eilon Greensteinca003922009-08-12 22:53:28 -07007970 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007971#ifdef BCM_CNIC
7972 /* Enable Timer scan */
7973 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7974#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007975 for_each_nondefault_queue(bp, i) {
7976 rc = bnx2x_setup_multi(bp, i);
7977 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007978#ifdef BCM_CNIC
7979 goto load_error4;
7980#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007981 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007982#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007983 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007984
Eilon Greensteinca003922009-08-12 22:53:28 -07007985 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007986 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007987 else
Michael Chane665bfd2009-10-10 13:46:54 +00007988 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007989#ifdef BCM_CNIC
7990 /* Set iSCSI L2 MAC */
7991 mutex_lock(&bp->cnic_mutex);
7992 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7994 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
Michael Chan4a6e47a2009-12-25 17:13:07 -08007995 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7996 CNIC_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00007997 }
7998 mutex_unlock(&bp->cnic_mutex);
7999#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07008000 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008001
8002 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00008003 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008004
8005 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008006 switch (load_mode) {
8007 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07008008 if (bp->state == BNX2X_STATE_OPEN) {
8009 /* Tx queue should be only reenabled */
8010 netif_tx_wake_all_queues(bp->dev);
8011 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008012 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008013 bnx2x_set_rx_mode(bp->dev);
8014 break;
8015
8016 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008017 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07008018 if (bp->state != BNX2X_STATE_OPEN)
8019 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008020 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008021 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008022 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008023
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008024 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008025 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008026 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008027 bp->state = BNX2X_STATE_DIAG;
8028 break;
8029
8030 default:
8031 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008032 }
8033
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008034 if (!bp->port.pmf)
8035 bnx2x__link_status_update(bp);
8036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008037 /* start the timer */
8038 mod_timer(&bp->timer, jiffies + bp->current_interval);
8039
Michael Chan993ac7b2009-10-10 13:46:56 +00008040#ifdef BCM_CNIC
8041 bnx2x_setup_cnic_irq_info(bp);
8042 if (bp->state == BNX2X_STATE_OPEN)
8043 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8044#endif
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008045 bnx2x_inc_load_cnt(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008046
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008047 return 0;
8048
Michael Chan37b091b2009-10-10 13:46:55 +00008049#ifdef BCM_CNIC
8050load_error4:
8051 /* Disable Timer scan */
8052 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8053#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008054load_error3:
8055 bnx2x_int_disable_sync(bp, 1);
8056 if (!BP_NOMCP(bp)) {
8057 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8058 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8059 }
8060 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008061 /* Free SKBs, SGEs, TPA pool and driver internals */
8062 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008063 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008064 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008065load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07008066 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00008067 bnx2x_free_irq(bp, false);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00008068load_error1:
8069 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008070 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008071 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008072 bnx2x_free_mem(bp);
8073
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008074 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008075}
8076
8077static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8078{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008079 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008080 int rc;
8081
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008082 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008083 fp->state = BNX2X_FP_STATE_HALTING;
8084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008085
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008086 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008087 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008088 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008089 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008090 return rc;
8091
8092 /* delete cfc entry */
8093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008095 /* Wait for completion */
8096 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008097 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008098 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008099}
8100
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008101static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008102{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00008103 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08008104 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008105 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008106 int cnt = 500;
8107 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008108
8109 might_sleep();
8110
8111 /* Send HALT ramrod */
8112 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00008113 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008114
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008115 /* Wait for completion */
8116 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8117 &(bp->fp[0].state), 1);
8118 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008119 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008120
Eliezer Tamir49d66772008-02-28 11:53:13 -08008121 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008122
Eliezer Tamir228241e2008-02-28 11:56:57 -08008123 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008124 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8125
Eliezer Tamir49d66772008-02-28 11:53:13 -08008126 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008127 we are going to reset the chip anyway
8128 so there is not much to do if this times out
8129 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008130 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008131 if (!cnt) {
8132 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8133 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8134 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8135#ifdef BNX2X_STOP_ON_ERROR
8136 bnx2x_panic();
8137#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00008138 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008139 break;
8140 }
8141 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008142 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00008143 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08008144 }
8145 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8146 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008147
8148 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008149}
8150
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008151static void bnx2x_reset_func(struct bnx2x *bp)
8152{
8153 int port = BP_PORT(bp);
8154 int func = BP_FUNC(bp);
8155 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08008156
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008157 /* Configure IGU */
8158 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8160
Michael Chan37b091b2009-10-10 13:46:55 +00008161#ifdef BCM_CNIC
8162 /* Disable Timer scan */
8163 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8164 /*
8165 * Wait for at least 10ms and up to 2 second for the timers scan to
8166 * complete
8167 */
8168 for (i = 0; i < 200; i++) {
8169 msleep(10);
8170 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8171 break;
8172 }
8173#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008174 /* Clear ILT */
8175 base = FUNC_ILT_BASE(func);
8176 for (i = base; i < base + ILT_PER_FUNC; i++)
8177 bnx2x_ilt_wr(bp, i, 0);
8178}
8179
8180static void bnx2x_reset_port(struct bnx2x *bp)
8181{
8182 int port = BP_PORT(bp);
8183 u32 val;
8184
8185 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8186
8187 /* Do not rcv packets to BRB */
8188 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8189 /* Do not direct rcv packets that are not for MCP to the BRB */
8190 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8191 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8192
8193 /* Configure AEU */
8194 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8195
8196 msleep(100);
8197 /* Check for BRB port occupancy */
8198 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8199 if (val)
8200 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07008201 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008202
8203 /* TODO: Close Doorbell port? */
8204}
8205
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008206static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8207{
8208 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8209 BP_FUNC(bp), reset_code);
8210
8211 switch (reset_code) {
8212 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8213 bnx2x_reset_port(bp);
8214 bnx2x_reset_func(bp);
8215 bnx2x_reset_common(bp);
8216 break;
8217
8218 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8219 bnx2x_reset_port(bp);
8220 bnx2x_reset_func(bp);
8221 break;
8222
8223 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8224 bnx2x_reset_func(bp);
8225 break;
8226
8227 default:
8228 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8229 break;
8230 }
8231}
8232
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008233static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008234{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008235 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008236 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008237 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008238
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008239 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008240 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08008241 struct bnx2x_fastpath *fp = &bp->fp[i];
8242
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008243 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08008244 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008245
Eilon Greenstein7961f792009-03-02 07:59:31 +00008246 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008247 if (!cnt) {
8248 BNX2X_ERR("timeout waiting for queue[%d]\n",
8249 i);
8250#ifdef BNX2X_STOP_ON_ERROR
8251 bnx2x_panic();
8252 return -EBUSY;
8253#else
8254 break;
8255#endif
8256 }
8257 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008258 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008259 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08008260 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008261 /* Give HW time to discard old tx messages */
8262 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008263
Yitchak Gertner65abd742008-08-25 15:26:24 -07008264 if (CHIP_IS_E1(bp)) {
8265 struct mac_configuration_cmd *config =
8266 bnx2x_sp(bp, mcast_config);
8267
Michael Chane665bfd2009-10-10 13:46:54 +00008268 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07008269
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08008270 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07008271 CAM_INVALIDATE(config->config_table[i]);
8272
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08008273 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07008274 if (CHIP_REV_IS_SLOW(bp))
8275 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8276 else
8277 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00008278 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07008279 config->hdr.reserved1 = 0;
8280
Michael Chane665bfd2009-10-10 13:46:54 +00008281 bp->set_mac_pending++;
8282 smp_wmb();
8283
Yitchak Gertner65abd742008-08-25 15:26:24 -07008284 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8285 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8286 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8287
8288 } else { /* E1H */
8289 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8290
Michael Chane665bfd2009-10-10 13:46:54 +00008291 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07008292
8293 for (i = 0; i < MC_HASH_SIZE; i++)
8294 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008295
8296 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07008297 }
Michael Chan993ac7b2009-10-10 13:46:56 +00008298#ifdef BCM_CNIC
8299 /* Clear iSCSI L2 MAC */
8300 mutex_lock(&bp->cnic_mutex);
8301 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8302 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8303 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8304 }
8305 mutex_unlock(&bp->cnic_mutex);
8306#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07008307
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008308 if (unload_mode == UNLOAD_NORMAL)
8309 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008310
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008311 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008312 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008313
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00008314 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008315 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008316 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008317 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008318 /* The mac address is written to entries 1-4 to
8319 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008320 u8 entry = (BP_E1HVN(bp) + 1)*8;
8321
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008322 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07008323 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008324
8325 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8326 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07008327 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008328
8329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008330
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008331 } else
8332 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8333
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008334 /* Close multi and leading connections
8335 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008336 for_each_nondefault_queue(bp, i)
8337 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08008338 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008339
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008340 rc = bnx2x_stop_leading(bp);
8341 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008342 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008343#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008344 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008345#else
8346 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008347#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08008348 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008349
Eliezer Tamir228241e2008-02-28 11:56:57 -08008350unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008351 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08008352 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008353 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00008354 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008355 load_count[0], load_count[1], load_count[2]);
8356 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008357 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00008358 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008359 load_count[0], load_count[1], load_count[2]);
8360 if (load_count[0] == 0)
8361 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008362 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008363 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8364 else
8365 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8366 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008367
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008368 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8369 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8370 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008371
8372 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08008373 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008374
8375 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008376 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008377 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00008378
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008379}
8380
8381static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8382{
8383 u32 val;
8384
8385 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8386
8387 if (CHIP_IS_E1(bp)) {
8388 int port = BP_PORT(bp);
8389 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8390 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8391
8392 val = REG_RD(bp, addr);
8393 val &= ~(0x300);
8394 REG_WR(bp, addr, val);
8395 } else if (CHIP_IS_E1H(bp)) {
8396 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8397 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8398 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8399 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8400 }
8401}
8402
8403/* must be called with rtnl_lock */
8404static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8405{
8406 int i;
8407
8408 if (bp->state == BNX2X_STATE_CLOSED) {
8409 /* Interface has been removed - nothing to recover */
8410 bp->recovery_state = BNX2X_RECOVERY_DONE;
8411 bp->is_leader = 0;
8412 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8413 smp_wmb();
8414
8415 return -EINVAL;
8416 }
8417
8418#ifdef BCM_CNIC
8419 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8420#endif
8421 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8422
8423 /* Set "drop all" */
8424 bp->rx_mode = BNX2X_RX_MODE_NONE;
8425 bnx2x_set_storm_rx_mode(bp);
8426
8427 /* Disable HW interrupts, NAPI and Tx */
8428 bnx2x_netif_stop(bp, 1);
8429
8430 del_timer_sync(&bp->timer);
8431 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8432 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8434
8435 /* Release IRQs */
8436 bnx2x_free_irq(bp, false);
8437
8438 /* Cleanup the chip if needed */
8439 if (unload_mode != UNLOAD_RECOVERY)
8440 bnx2x_chip_cleanup(bp, unload_mode);
8441
Eilon Greenstein9a035442008-11-03 16:45:55 -08008442 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008444 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008445 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008446 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008448 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008449 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008450 bnx2x_free_mem(bp);
8451
8452 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008453
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008454 netif_carrier_off(bp->dev);
8455
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008456 /* The last driver must disable a "close the gate" if there is no
8457 * parity attention or "process kill" pending.
8458 */
8459 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8460 bnx2x_reset_is_done(bp))
8461 bnx2x_disable_close_the_gate(bp);
8462
8463 /* Reset MCP mail box sequence if there is on going recovery */
8464 if (unload_mode == UNLOAD_RECOVERY)
8465 bp->fw_seq = 0;
8466
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008467 return 0;
8468}
8469
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008470/* Close gates #2, #3 and #4: */
8471static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8472{
8473 u32 val, addr;
8474
8475 /* Gates #2 and #4a are closed/opened for "not E1" only */
8476 if (!CHIP_IS_E1(bp)) {
8477 /* #4 */
8478 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8479 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8480 close ? (val | 0x1) : (val & (~(u32)1)));
8481 /* #2 */
8482 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8483 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8484 close ? (val | 0x1) : (val & (~(u32)1)));
8485 }
8486
8487 /* #3 */
8488 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8489 val = REG_RD(bp, addr);
8490 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8491
8492 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8493 close ? "closing" : "opening");
8494 mmiowb();
8495}
8496
8497#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8498
8499static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8500{
8501 /* Do some magic... */
8502 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8503 *magic_val = val & SHARED_MF_CLP_MAGIC;
8504 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8505}
8506
8507/* Restore the value of the `magic' bit.
8508 *
8509 * @param pdev Device handle.
8510 * @param magic_val Old value of the `magic' bit.
8511 */
8512static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8513{
8514 /* Restore the `magic' bit value... */
8515 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8516 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8517 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8518 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8519 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8520 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8521}
8522
8523/* Prepares for MCP reset: takes care of CLP configurations.
8524 *
8525 * @param bp
8526 * @param magic_val Old value of 'magic' bit.
8527 */
8528static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8529{
8530 u32 shmem;
8531 u32 validity_offset;
8532
8533 DP(NETIF_MSG_HW, "Starting\n");
8534
8535 /* Set `magic' bit in order to save MF config */
8536 if (!CHIP_IS_E1(bp))
8537 bnx2x_clp_reset_prep(bp, magic_val);
8538
8539 /* Get shmem offset */
8540 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8541 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8542
8543 /* Clear validity map flags */
8544 if (shmem > 0)
8545 REG_WR(bp, shmem + validity_offset, 0);
8546}
8547
8548#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8549#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8550
8551/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8552 * depending on the HW type.
8553 *
8554 * @param bp
8555 */
8556static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8557{
8558 /* special handling for emulation and FPGA,
8559 wait 10 times longer */
8560 if (CHIP_REV_IS_SLOW(bp))
8561 msleep(MCP_ONE_TIMEOUT*10);
8562 else
8563 msleep(MCP_ONE_TIMEOUT);
8564}
8565
8566static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8567{
8568 u32 shmem, cnt, validity_offset, val;
8569 int rc = 0;
8570
8571 msleep(100);
8572
8573 /* Get shmem offset */
8574 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8575 if (shmem == 0) {
8576 BNX2X_ERR("Shmem 0 return failure\n");
8577 rc = -ENOTTY;
8578 goto exit_lbl;
8579 }
8580
8581 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8582
8583 /* Wait for MCP to come up */
8584 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8585 /* TBD: its best to check validity map of last port.
8586 * currently checks on port 0.
8587 */
8588 val = REG_RD(bp, shmem + validity_offset);
8589 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8590 shmem + validity_offset, val);
8591
8592 /* check that shared memory is valid. */
8593 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8594 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8595 break;
8596
8597 bnx2x_mcp_wait_one(bp);
8598 }
8599
8600 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8601
8602 /* Check that shared memory is valid. This indicates that MCP is up. */
8603 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8604 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8605 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8606 rc = -ENOTTY;
8607 goto exit_lbl;
8608 }
8609
8610exit_lbl:
8611 /* Restore the `magic' bit value */
8612 if (!CHIP_IS_E1(bp))
8613 bnx2x_clp_reset_done(bp, magic_val);
8614
8615 return rc;
8616}
8617
8618static void bnx2x_pxp_prep(struct bnx2x *bp)
8619{
8620 if (!CHIP_IS_E1(bp)) {
8621 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8622 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8623 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8624 mmiowb();
8625 }
8626}
8627
8628/*
8629 * Reset the whole chip except for:
8630 * - PCIE core
8631 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8632 * one reset bit)
8633 * - IGU
8634 * - MISC (including AEU)
8635 * - GRC
8636 * - RBCN, RBCP
8637 */
8638static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8639{
8640 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8641
8642 not_reset_mask1 =
8643 MISC_REGISTERS_RESET_REG_1_RST_HC |
8644 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8645 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8646
8647 not_reset_mask2 =
8648 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8649 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8650 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8651 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8652 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8653 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8654 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8655 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8656
8657 reset_mask1 = 0xffffffff;
8658
8659 if (CHIP_IS_E1(bp))
8660 reset_mask2 = 0xffff;
8661 else
8662 reset_mask2 = 0x1ffff;
8663
8664 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8665 reset_mask1 & (~not_reset_mask1));
8666 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8667 reset_mask2 & (~not_reset_mask2));
8668
8669 barrier();
8670 mmiowb();
8671
8672 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8674 mmiowb();
8675}
8676
8677static int bnx2x_process_kill(struct bnx2x *bp)
8678{
8679 int cnt = 1000;
8680 u32 val = 0;
8681 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8682
8683
8684 /* Empty the Tetris buffer, wait for 1s */
8685 do {
8686 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8687 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8688 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8689 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8690 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8691 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8692 ((port_is_idle_0 & 0x1) == 0x1) &&
8693 ((port_is_idle_1 & 0x1) == 0x1) &&
8694 (pgl_exp_rom2 == 0xffffffff))
8695 break;
8696 msleep(1);
8697 } while (cnt-- > 0);
8698
8699 if (cnt <= 0) {
8700 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8701 " are still"
8702 " outstanding read requests after 1s!\n");
8703 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8704 " port_is_idle_0=0x%08x,"
8705 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8706 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8707 pgl_exp_rom2);
8708 return -EAGAIN;
8709 }
8710
8711 barrier();
8712
8713 /* Close gates #2, #3 and #4 */
8714 bnx2x_set_234_gates(bp, true);
8715
8716 /* TBD: Indicate that "process kill" is in progress to MCP */
8717
8718 /* Clear "unprepared" bit */
8719 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8720 barrier();
8721
8722 /* Make sure all is written to the chip before the reset */
8723 mmiowb();
8724
8725 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8726 * PSWHST, GRC and PSWRD Tetris buffer.
8727 */
8728 msleep(1);
8729
8730 /* Prepare to chip reset: */
8731 /* MCP */
8732 bnx2x_reset_mcp_prep(bp, &val);
8733
8734 /* PXP */
8735 bnx2x_pxp_prep(bp);
8736 barrier();
8737
8738 /* reset the chip */
8739 bnx2x_process_kill_chip_reset(bp);
8740 barrier();
8741
8742 /* Recover after reset: */
8743 /* MCP */
8744 if (bnx2x_reset_mcp_comp(bp, val))
8745 return -EAGAIN;
8746
8747 /* PXP */
8748 bnx2x_pxp_prep(bp);
8749
8750 /* Open the gates #2, #3 and #4 */
8751 bnx2x_set_234_gates(bp, false);
8752
8753 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8754 * reset state, re-enable attentions. */
8755
8756 return 0;
8757}
8758
8759static int bnx2x_leader_reset(struct bnx2x *bp)
8760{
8761 int rc = 0;
8762 /* Try to recover after the failure */
8763 if (bnx2x_process_kill(bp)) {
8764 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8765 bp->dev->name);
8766 rc = -EAGAIN;
8767 goto exit_leader_reset;
8768 }
8769
8770 /* Clear "reset is in progress" bit and update the driver state */
8771 bnx2x_set_reset_done(bp);
8772 bp->recovery_state = BNX2X_RECOVERY_DONE;
8773
8774exit_leader_reset:
8775 bp->is_leader = 0;
8776 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8777 smp_wmb();
8778 return rc;
8779}
8780
8781static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8782
8783/* Assumption: runs under rtnl lock. This together with the fact
8784 * that it's called only from bnx2x_reset_task() ensure that it
8785 * will never be called when netif_running(bp->dev) is false.
8786 */
8787static void bnx2x_parity_recover(struct bnx2x *bp)
8788{
8789 DP(NETIF_MSG_HW, "Handling parity\n");
8790 while (1) {
8791 switch (bp->recovery_state) {
8792 case BNX2X_RECOVERY_INIT:
8793 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8794 /* Try to get a LEADER_LOCK HW lock */
8795 if (bnx2x_trylock_hw_lock(bp,
8796 HW_LOCK_RESOURCE_RESERVED_08))
8797 bp->is_leader = 1;
8798
8799 /* Stop the driver */
8800 /* If interface has been removed - break */
8801 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8802 return;
8803
8804 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8805 /* Ensure "is_leader" and "recovery_state"
8806 * update values are seen on other CPUs
8807 */
8808 smp_wmb();
8809 break;
8810
8811 case BNX2X_RECOVERY_WAIT:
8812 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8813 if (bp->is_leader) {
8814 u32 load_counter = bnx2x_get_load_cnt(bp);
8815 if (load_counter) {
8816 /* Wait until all other functions get
8817 * down.
8818 */
8819 schedule_delayed_work(&bp->reset_task,
8820 HZ/10);
8821 return;
8822 } else {
8823 /* If all other functions got down -
8824 * try to bring the chip back to
8825 * normal. In any case it's an exit
8826 * point for a leader.
8827 */
8828 if (bnx2x_leader_reset(bp) ||
8829 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8830 printk(KERN_ERR"%s: Recovery "
8831 "has failed. Power cycle is "
8832 "needed.\n", bp->dev->name);
8833 /* Disconnect this device */
8834 netif_device_detach(bp->dev);
8835 /* Block ifup for all function
8836 * of this ASIC until
8837 * "process kill" or power
8838 * cycle.
8839 */
8840 bnx2x_set_reset_in_progress(bp);
8841 /* Shut down the power */
8842 bnx2x_set_power_state(bp,
8843 PCI_D3hot);
8844 return;
8845 }
8846
8847 return;
8848 }
8849 } else { /* non-leader */
8850 if (!bnx2x_reset_is_done(bp)) {
8851 /* Try to get a LEADER_LOCK HW lock as
8852 * long as a former leader may have
8853 * been unloaded by the user or
8854 * released a leadership by another
8855 * reason.
8856 */
8857 if (bnx2x_trylock_hw_lock(bp,
8858 HW_LOCK_RESOURCE_RESERVED_08)) {
8859 /* I'm a leader now! Restart a
8860 * switch case.
8861 */
8862 bp->is_leader = 1;
8863 break;
8864 }
8865
8866 schedule_delayed_work(&bp->reset_task,
8867 HZ/10);
8868 return;
8869
8870 } else { /* A leader has completed
8871 * the "process kill". It's an exit
8872 * point for a non-leader.
8873 */
8874 bnx2x_nic_load(bp, LOAD_NORMAL);
8875 bp->recovery_state =
8876 BNX2X_RECOVERY_DONE;
8877 smp_wmb();
8878 return;
8879 }
8880 }
8881 default:
8882 return;
8883 }
8884 }
8885}
8886
8887/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8888 * scheduled on a general queue in order to prevent a dead lock.
8889 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008890static void bnx2x_reset_task(struct work_struct *work)
8891{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008892 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008893
8894#ifdef BNX2X_STOP_ON_ERROR
8895 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8896 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008897 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008898 return;
8899#endif
8900
8901 rtnl_lock();
8902
8903 if (!netif_running(bp->dev))
8904 goto reset_task_exit;
8905
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008906 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8907 bnx2x_parity_recover(bp);
8908 else {
8909 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8910 bnx2x_nic_load(bp, LOAD_NORMAL);
8911 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008912
8913reset_task_exit:
8914 rtnl_unlock();
8915}
8916
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008917/* end of nic load/unload */
8918
8919/* ethtool_ops */
8920
8921/*
8922 * Init service functions
8923 */
8924
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008925static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8926{
8927 switch (func) {
8928 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8929 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8930 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8931 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8932 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8933 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8934 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8935 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8936 default:
8937 BNX2X_ERR("Unsupported function index: %d\n", func);
8938 return (u32)(-1);
8939 }
8940}
8941
8942static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8943{
8944 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8945
8946 /* Flush all outstanding writes */
8947 mmiowb();
8948
8949 /* Pretend to be function 0 */
8950 REG_WR(bp, reg, 0);
8951 /* Flush the GRC transaction (in the chip) */
8952 new_val = REG_RD(bp, reg);
8953 if (new_val != 0) {
8954 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8955 new_val);
8956 BUG();
8957 }
8958
8959 /* From now we are in the "like-E1" mode */
8960 bnx2x_int_disable(bp);
8961
8962 /* Flush all outstanding writes */
8963 mmiowb();
8964
8965 /* Restore the original funtion settings */
8966 REG_WR(bp, reg, orig_func);
8967 new_val = REG_RD(bp, reg);
8968 if (new_val != orig_func) {
8969 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8970 orig_func, new_val);
8971 BUG();
8972 }
8973}
8974
8975static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8976{
8977 if (CHIP_IS_E1H(bp))
8978 bnx2x_undi_int_disable_e1h(bp, func);
8979 else
8980 bnx2x_int_disable(bp);
8981}
8982
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008983static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008984{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008985 u32 val;
8986
8987 /* Check if there is any driver already loaded */
8988 val = REG_RD(bp, MISC_REG_UNPREPARED);
8989 if (val == 0x1) {
8990 /* Check if it is the UNDI driver
8991 * UNDI driver initializes CID offset for normal bell to 0x7
8992 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008993 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008994 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8995 if (val == 0x7) {
8996 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008997 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008998 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008999 u32 swap_en;
9000 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009001
Eilon Greensteinb4661732009-01-14 06:43:56 +00009002 /* clear the UNDI indication */
9003 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9004
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009005 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9006
9007 /* try unload UNDI on port 0 */
9008 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009009 bp->fw_seq =
9010 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9011 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009012 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009013
9014 /* if UNDI is loaded on the other port */
9015 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9016
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009017 /* send "DONE" for previous unload */
9018 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9019
9020 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009021 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009022 bp->fw_seq =
9023 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9024 DRV_MSG_SEQ_NUMBER_MASK);
9025 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009026
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009027 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009028 }
9029
Eilon Greensteinb4661732009-01-14 06:43:56 +00009030 /* now it's safe to release the lock */
9031 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9032
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00009033 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009034
9035 /* close input traffic and wait for it */
9036 /* Do not rcv packets to BRB */
9037 REG_WR(bp,
9038 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9039 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9040 /* Do not direct rcv packets that are not for MCP to
9041 * the BRB */
9042 REG_WR(bp,
9043 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9044 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9045 /* clear AEU */
9046 REG_WR(bp,
9047 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9048 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9049 msleep(10);
9050
9051 /* save NIG port swap info */
9052 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9053 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009054 /* reset device */
9055 REG_WR(bp,
9056 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009057 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009058 REG_WR(bp,
9059 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9060 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009061 /* take the NIG out of reset and restore swap values */
9062 REG_WR(bp,
9063 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9064 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9065 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9066 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9067
9068 /* send unload done to the MCP */
9069 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9070
9071 /* restore our func and fw_seq */
9072 bp->func = func;
9073 bp->fw_seq =
9074 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9075 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00009076
9077 } else
9078 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009079 }
9080}
9081
9082static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9083{
9084 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07009085 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009086
9087 /* Get the chip revision id and number. */
9088 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9089 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9090 id = ((val & 0xffff) << 16);
9091 val = REG_RD(bp, MISC_REG_CHIP_REV);
9092 id |= ((val & 0xf) << 12);
9093 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9094 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00009095 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009096 id |= (val & 0xf);
9097 bp->common.chip_id = id;
9098 bp->link_params.chip_id = bp->common.chip_id;
9099 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9100
Eilon Greenstein1c063282009-02-12 08:36:43 +00009101 val = (REG_RD(bp, 0x2874) & 0x55);
9102 if ((bp->common.chip_id & 0x1) ||
9103 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9104 bp->flags |= ONE_PORT_FLAG;
9105 BNX2X_DEV_INFO("single port device\n");
9106 }
9107
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009108 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9109 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9110 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9111 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9112 bp->common.flash_size, bp->common.flash_size);
9113
9114 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00009115 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009116 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00009117 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9118 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009119
9120 if (!bp->common.shmem_base ||
9121 (bp->common.shmem_base < 0xA0000) ||
9122 (bp->common.shmem_base >= 0xC0000)) {
9123 BNX2X_DEV_INFO("MCP not active\n");
9124 bp->flags |= NO_MCP_FLAG;
9125 return;
9126 }
9127
9128 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9129 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9130 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9131 BNX2X_ERR("BAD MCP validity signature\n");
9132
9133 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00009134 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009135
9136 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9137 SHARED_HW_CFG_LED_MODE_MASK) >>
9138 SHARED_HW_CFG_LED_MODE_SHIFT);
9139
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009140 bp->link_params.feature_config_flags = 0;
9141 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9142 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9143 bp->link_params.feature_config_flags |=
9144 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9145 else
9146 bp->link_params.feature_config_flags &=
9147 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9148
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009149 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9150 bp->common.bc_ver = val;
9151 BNX2X_DEV_INFO("bc_ver %X\n", val);
9152 if (val < BNX2X_BC_VER) {
9153 /* for now only warn
9154 * later we might need to enforce this */
9155 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
9156 " please upgrade BC\n", BNX2X_BC_VER, val);
9157 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009158 bp->link_params.feature_config_flags |=
9159 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9160 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07009161
9162 if (BP_E1HVN(bp) == 0) {
9163 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9164 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9165 } else {
9166 /* no WOL capability for E1HVN != 0 */
9167 bp->flags |= NO_WOL_FLAG;
9168 }
9169 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00009170 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009171
9172 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9173 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9174 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9175 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9176
Joe Perches7995c642010-02-17 15:01:52 +00009177 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009178}
9179
9180static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9181 u32 switch_cfg)
9182{
9183 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009184 u32 ext_phy_type;
9185
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009186 switch (switch_cfg) {
9187 case SWITCH_CFG_1G:
9188 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9189
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009190 ext_phy_type =
9191 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009192 switch (ext_phy_type) {
9193 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9194 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9195 ext_phy_type);
9196
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009197 bp->port.supported |= (SUPPORTED_10baseT_Half |
9198 SUPPORTED_10baseT_Full |
9199 SUPPORTED_100baseT_Half |
9200 SUPPORTED_100baseT_Full |
9201 SUPPORTED_1000baseT_Full |
9202 SUPPORTED_2500baseX_Full |
9203 SUPPORTED_TP |
9204 SUPPORTED_FIBRE |
9205 SUPPORTED_Autoneg |
9206 SUPPORTED_Pause |
9207 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009208 break;
9209
9210 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9211 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9212 ext_phy_type);
9213
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009214 bp->port.supported |= (SUPPORTED_10baseT_Half |
9215 SUPPORTED_10baseT_Full |
9216 SUPPORTED_100baseT_Half |
9217 SUPPORTED_100baseT_Full |
9218 SUPPORTED_1000baseT_Full |
9219 SUPPORTED_TP |
9220 SUPPORTED_FIBRE |
9221 SUPPORTED_Autoneg |
9222 SUPPORTED_Pause |
9223 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009224 break;
9225
9226 default:
9227 BNX2X_ERR("NVRAM config error. "
9228 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009229 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009230 return;
9231 }
9232
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009233 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9234 port*0x10);
9235 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009236 break;
9237
9238 case SWITCH_CFG_10G:
9239 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9240
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009241 ext_phy_type =
9242 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009243 switch (ext_phy_type) {
9244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9245 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9246 ext_phy_type);
9247
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009248 bp->port.supported |= (SUPPORTED_10baseT_Half |
9249 SUPPORTED_10baseT_Full |
9250 SUPPORTED_100baseT_Half |
9251 SUPPORTED_100baseT_Full |
9252 SUPPORTED_1000baseT_Full |
9253 SUPPORTED_2500baseX_Full |
9254 SUPPORTED_10000baseT_Full |
9255 SUPPORTED_TP |
9256 SUPPORTED_FIBRE |
9257 SUPPORTED_Autoneg |
9258 SUPPORTED_Pause |
9259 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009260 break;
9261
Eliezer Tamirf1410642008-02-28 11:51:50 -08009262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9263 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9264 ext_phy_type);
9265
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009266 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9267 SUPPORTED_1000baseT_Full |
9268 SUPPORTED_FIBRE |
9269 SUPPORTED_Autoneg |
9270 SUPPORTED_Pause |
9271 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009272 break;
9273
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9275 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9276 ext_phy_type);
9277
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009278 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9279 SUPPORTED_2500baseX_Full |
9280 SUPPORTED_1000baseT_Full |
9281 SUPPORTED_FIBRE |
9282 SUPPORTED_Autoneg |
9283 SUPPORTED_Pause |
9284 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009285 break;
9286
Eilon Greenstein589abe32009-02-12 08:36:55 +00009287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9288 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9289 ext_phy_type);
9290
9291 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9292 SUPPORTED_FIBRE |
9293 SUPPORTED_Pause |
9294 SUPPORTED_Asym_Pause);
9295 break;
9296
9297 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9299 ext_phy_type);
9300
9301 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9302 SUPPORTED_1000baseT_Full |
9303 SUPPORTED_FIBRE |
9304 SUPPORTED_Pause |
9305 SUPPORTED_Asym_Pause);
9306 break;
9307
9308 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9309 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9310 ext_phy_type);
9311
9312 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9313 SUPPORTED_1000baseT_Full |
9314 SUPPORTED_Autoneg |
9315 SUPPORTED_FIBRE |
9316 SUPPORTED_Pause |
9317 SUPPORTED_Asym_Pause);
9318 break;
9319
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009320 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9321 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9322 ext_phy_type);
9323
9324 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9325 SUPPORTED_1000baseT_Full |
9326 SUPPORTED_Autoneg |
9327 SUPPORTED_FIBRE |
9328 SUPPORTED_Pause |
9329 SUPPORTED_Asym_Pause);
9330 break;
9331
Eliezer Tamirf1410642008-02-28 11:51:50 -08009332 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9333 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9334 ext_phy_type);
9335
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009336 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9337 SUPPORTED_TP |
9338 SUPPORTED_Autoneg |
9339 SUPPORTED_Pause |
9340 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009341 break;
9342
Eilon Greenstein28577182009-02-12 08:37:00 +00009343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9344 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9345 ext_phy_type);
9346
9347 bp->port.supported |= (SUPPORTED_10baseT_Half |
9348 SUPPORTED_10baseT_Full |
9349 SUPPORTED_100baseT_Half |
9350 SUPPORTED_100baseT_Full |
9351 SUPPORTED_1000baseT_Full |
9352 SUPPORTED_10000baseT_Full |
9353 SUPPORTED_TP |
9354 SUPPORTED_Autoneg |
9355 SUPPORTED_Pause |
9356 SUPPORTED_Asym_Pause);
9357 break;
9358
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9360 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9361 bp->link_params.ext_phy_config);
9362 break;
9363
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009364 default:
9365 BNX2X_ERR("NVRAM config error. "
9366 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009367 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009368 return;
9369 }
9370
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009371 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9372 port*0x18);
9373 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009374
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009375 break;
9376
9377 default:
9378 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009379 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009380 return;
9381 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009382 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009383
9384 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009385 if (!(bp->link_params.speed_cap_mask &
9386 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009387 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009388
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009389 if (!(bp->link_params.speed_cap_mask &
9390 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009391 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009392
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009393 if (!(bp->link_params.speed_cap_mask &
9394 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009395 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009396
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009397 if (!(bp->link_params.speed_cap_mask &
9398 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009399 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009400
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009401 if (!(bp->link_params.speed_cap_mask &
9402 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009403 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9404 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009405
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009406 if (!(bp->link_params.speed_cap_mask &
9407 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009408 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009409
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009410 if (!(bp->link_params.speed_cap_mask &
9411 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009412 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009414 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009415}
9416
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009417static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009418{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009419 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009421 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009422 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009423 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009424 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009425 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009426 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009427 u32 ext_phy_type =
9428 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9429
9430 if ((ext_phy_type ==
9431 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9432 (ext_phy_type ==
9433 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009434 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009435 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009436 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009437 (ADVERTISED_10000baseT_Full |
9438 ADVERTISED_FIBRE);
9439 break;
9440 }
9441 BNX2X_ERR("NVRAM config error. "
9442 "Invalid link_config 0x%x"
9443 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009444 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009445 return;
9446 }
9447 break;
9448
9449 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009450 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009451 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009452 bp->port.advertising = (ADVERTISED_10baseT_Full |
9453 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009454 } else {
9455 BNX2X_ERR("NVRAM config error. "
9456 "Invalid link_config 0x%x"
9457 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009458 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009459 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009460 return;
9461 }
9462 break;
9463
9464 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009465 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009466 bp->link_params.req_line_speed = SPEED_10;
9467 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009468 bp->port.advertising = (ADVERTISED_10baseT_Half |
9469 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009470 } else {
9471 BNX2X_ERR("NVRAM config error. "
9472 "Invalid link_config 0x%x"
9473 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009474 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009475 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009476 return;
9477 }
9478 break;
9479
9480 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009481 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009482 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009483 bp->port.advertising = (ADVERTISED_100baseT_Full |
9484 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009485 } else {
9486 BNX2X_ERR("NVRAM config error. "
9487 "Invalid link_config 0x%x"
9488 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009489 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009490 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009491 return;
9492 }
9493 break;
9494
9495 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009496 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009497 bp->link_params.req_line_speed = SPEED_100;
9498 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009499 bp->port.advertising = (ADVERTISED_100baseT_Half |
9500 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009501 } else {
9502 BNX2X_ERR("NVRAM config error. "
9503 "Invalid link_config 0x%x"
9504 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009505 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009506 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009507 return;
9508 }
9509 break;
9510
9511 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009512 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009513 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009514 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9515 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009516 } else {
9517 BNX2X_ERR("NVRAM config error. "
9518 "Invalid link_config 0x%x"
9519 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009520 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009521 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009522 return;
9523 }
9524 break;
9525
9526 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009527 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009528 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009529 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9530 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009531 } else {
9532 BNX2X_ERR("NVRAM config error. "
9533 "Invalid link_config 0x%x"
9534 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009535 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009536 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009537 return;
9538 }
9539 break;
9540
9541 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9542 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9543 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009544 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009545 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009546 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9547 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009548 } else {
9549 BNX2X_ERR("NVRAM config error. "
9550 "Invalid link_config 0x%x"
9551 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009552 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009553 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009554 return;
9555 }
9556 break;
9557
9558 default:
9559 BNX2X_ERR("NVRAM config error. "
9560 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009561 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009562 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009563 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009564 break;
9565 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009566
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009567 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9568 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08009569 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07009570 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08009571 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009572
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009573 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08009574 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009575 bp->link_params.req_line_speed,
9576 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009577 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009578}
9579
Michael Chane665bfd2009-10-10 13:46:54 +00009580static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9581{
9582 mac_hi = cpu_to_be16(mac_hi);
9583 mac_lo = cpu_to_be32(mac_lo);
9584 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9585 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9586}
9587
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009588static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009589{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009590 int port = BP_PORT(bp);
9591 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00009592 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009593 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009594 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009595
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009596 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009597 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009598
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009599 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009600 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009601 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009602 SHMEM_RD(bp,
9603 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009604 /* BCM8727_NOC => BCM8727 no over current */
9605 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9606 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9607 bp->link_params.ext_phy_config &=
9608 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9609 bp->link_params.ext_phy_config |=
9610 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9611 bp->link_params.feature_config_flags |=
9612 FEATURE_CONFIG_BCM8727_NOC;
9613 }
9614
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009615 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009616 SHMEM_RD(bp,
9617 dev_info.port_hw_config[port].speed_capability_mask);
9618
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009619 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009620 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9621
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009622 /* Get the 4 lanes xgxs config rx and tx */
9623 for (i = 0; i < 2; i++) {
9624 val = SHMEM_RD(bp,
9625 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9626 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9627 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9628
9629 val = SHMEM_RD(bp,
9630 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9631 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9632 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9633 }
9634
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00009635 /* If the device is capable of WoL, set the default state according
9636 * to the HW
9637 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009638 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00009639 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9640 (config & PORT_FEATURE_WOL_ENABLED));
9641
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00009642 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9643 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009644 bp->link_params.lane_config,
9645 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009646 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009647
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009648 bp->link_params.switch_cfg |= (bp->port.link_config &
9649 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009650 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009651
9652 bnx2x_link_settings_requested(bp);
9653
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009654 /*
9655 * If connected directly, work with the internal PHY, otherwise, work
9656 * with the external PHY
9657 */
9658 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9659 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9660 bp->mdio.prtad = bp->link_params.phy_addr;
9661
9662 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9663 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9664 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009665 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009666
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009667 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9668 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00009669 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009670 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9671 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00009672
9673#ifdef BCM_CNIC
9674 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9675 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9676 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9677#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009678}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009679
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009680static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9681{
9682 int func = BP_FUNC(bp);
9683 u32 val, val2;
9684 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009685
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009686 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009688 bp->e1hov = 0;
9689 bp->e1hmf = 0;
9690 if (CHIP_IS_E1H(bp)) {
9691 bp->mf_config =
9692 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009693
Eilon Greenstein2691d512009-08-12 08:22:08 +00009694 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07009695 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00009696 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009697 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00009698 BNX2X_DEV_INFO("%s function mode\n",
9699 IS_E1HMF(bp) ? "multi" : "single");
9700
9701 if (IS_E1HMF(bp)) {
9702 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9703 e1hov_tag) &
9704 FUNC_MF_CFG_E1HOV_TAG_MASK);
9705 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9706 bp->e1hov = val;
9707 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9708 "(0x%04x)\n",
9709 func, bp->e1hov, bp->e1hov);
9710 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009711 BNX2X_ERR("!!! No valid E1HOV for func %d,"
9712 " aborting\n", func);
9713 rc = -EPERM;
9714 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00009715 } else {
9716 if (BP_E1HVN(bp)) {
9717 BNX2X_ERR("!!! VN %d in single function mode,"
9718 " aborting\n", BP_E1HVN(bp));
9719 rc = -EPERM;
9720 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009721 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009722 }
9723
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009724 if (!BP_NOMCP(bp)) {
9725 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009726
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009727 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9728 DRV_MSG_SEQ_NUMBER_MASK);
9729 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9730 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009731
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009732 if (IS_E1HMF(bp)) {
9733 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9734 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9735 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9736 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9737 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9738 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9739 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9740 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9741 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9742 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9743 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9744 ETH_ALEN);
9745 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9746 ETH_ALEN);
9747 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009748
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009749 return rc;
9750 }
9751
9752 if (BP_NOMCP(bp)) {
9753 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07009754 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009755 random_ether_addr(bp->dev->dev_addr);
9756 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9757 }
9758
9759 return rc;
9760}
9761
9762static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9763{
9764 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00009765 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009766 int rc;
9767
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009768 /* Disable interrupt handling until HW is initialized */
9769 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00009770 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07009771
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009772 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07009773 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00009774#ifdef BCM_CNIC
9775 mutex_init(&bp->cnic_mutex);
9776#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009777
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009778 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009779 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009780
9781 rc = bnx2x_get_hwinfo(bp);
9782
9783 /* need to reset chip if undi was active */
9784 if (!BP_NOMCP(bp))
9785 bnx2x_undi_unload(bp);
9786
9787 if (CHIP_REV_IS_FPGA(bp))
Joe Perches7995c642010-02-17 15:01:52 +00009788 pr_err("FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009789
9790 if (BP_NOMCP(bp) && (func == 0))
Joe Perches7995c642010-02-17 15:01:52 +00009791 pr_err("MCP disabled, must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009792
Eilon Greenstein555f6c72009-02-12 08:36:11 +00009793 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00009794 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9795 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Joe Perches7995c642010-02-17 15:01:52 +00009796 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00009797 multi_mode = ETH_RSS_MODE_DISABLED;
9798 }
9799 bp->multi_mode = multi_mode;
9800
9801
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07009802 bp->dev->features |= NETIF_F_GRO;
9803
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009804 /* Set TPA flags */
9805 if (disable_tpa) {
9806 bp->flags &= ~TPA_ENABLE_FLAG;
9807 bp->dev->features &= ~NETIF_F_LRO;
9808 } else {
9809 bp->flags |= TPA_ENABLE_FLAG;
9810 bp->dev->features |= NETIF_F_LRO;
9811 }
9812
Eilon Greensteina18f5122009-08-12 08:23:26 +00009813 if (CHIP_IS_E1(bp))
9814 bp->dropless_fc = 0;
9815 else
9816 bp->dropless_fc = dropless_fc;
9817
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00009818 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07009819
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009820 bp->tx_ring_size = MAX_TX_AVAIL;
9821 bp->rx_ring_size = MAX_RX_AVAIL;
9822
9823 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009824
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00009825 /* make sure that the numbers are in the right granularity */
9826 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9827 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009828
Eilon Greenstein87942b42009-02-12 08:36:49 +00009829 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9830 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009831
9832 init_timer(&bp->timer);
9833 bp->timer.expires = jiffies + bp->current_interval;
9834 bp->timer.data = (unsigned long) bp;
9835 bp->timer.function = bnx2x_timer;
9836
9837 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009838}
9839
9840/*
9841 * ethtool service functions
9842 */
9843
9844/* All ethtool functions called with rtnl_lock */
9845
9846static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9847{
9848 struct bnx2x *bp = netdev_priv(dev);
9849
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009850 cmd->supported = bp->port.supported;
9851 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009852
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009853 if ((bp->state == BNX2X_STATE_OPEN) &&
9854 !(bp->flags & MF_FUNC_DIS) &&
9855 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009856 cmd->speed = bp->link_vars.line_speed;
9857 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07009858 if (IS_E1HMF(bp)) {
9859 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009860
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07009861 vn_max_rate =
9862 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009863 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07009864 if (vn_max_rate < cmd->speed)
9865 cmd->speed = vn_max_rate;
9866 }
9867 } else {
9868 cmd->speed = -1;
9869 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009870 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009871
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009872 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9873 u32 ext_phy_type =
9874 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009875
9876 switch (ext_phy_type) {
9877 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009879 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9881 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9882 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009883 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009884 cmd->port = PORT_FIBRE;
9885 break;
9886
9887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009888 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009889 cmd->port = PORT_TP;
9890 break;
9891
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009892 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9893 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9894 bp->link_params.ext_phy_config);
9895 break;
9896
Eliezer Tamirf1410642008-02-28 11:51:50 -08009897 default:
9898 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009899 bp->link_params.ext_phy_config);
9900 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009901 }
9902 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009903 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009904
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009905 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009906 cmd->transceiver = XCVR_INTERNAL;
9907
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009908 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009909 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009910 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009911 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009912
9913 cmd->maxtxpkt = 0;
9914 cmd->maxrxpkt = 0;
9915
9916 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9917 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9918 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9919 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9920 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9921 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9922 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9923
9924 return 0;
9925}
9926
9927static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9928{
9929 struct bnx2x *bp = netdev_priv(dev);
9930 u32 advertising;
9931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009932 if (IS_E1HMF(bp))
9933 return 0;
9934
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009935 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9936 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9937 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9938 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9939 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9940 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9941 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9942
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009943 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009944 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9945 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009946 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009947 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009948
9949 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009950 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009951
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009952 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9953 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009954 bp->port.advertising |= (ADVERTISED_Autoneg |
9955 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009956
9957 } else { /* forced speed */
9958 /* advertise the requested speed and duplex if supported */
9959 switch (cmd->speed) {
9960 case SPEED_10:
9961 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009962 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009963 SUPPORTED_10baseT_Full)) {
9964 DP(NETIF_MSG_LINK,
9965 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009966 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009967 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009968
9969 advertising = (ADVERTISED_10baseT_Full |
9970 ADVERTISED_TP);
9971 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009972 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009973 SUPPORTED_10baseT_Half)) {
9974 DP(NETIF_MSG_LINK,
9975 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009976 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009977 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009978
9979 advertising = (ADVERTISED_10baseT_Half |
9980 ADVERTISED_TP);
9981 }
9982 break;
9983
9984 case SPEED_100:
9985 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009986 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009987 SUPPORTED_100baseT_Full)) {
9988 DP(NETIF_MSG_LINK,
9989 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009990 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009991 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009992
9993 advertising = (ADVERTISED_100baseT_Full |
9994 ADVERTISED_TP);
9995 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009996 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009997 SUPPORTED_100baseT_Half)) {
9998 DP(NETIF_MSG_LINK,
9999 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010000 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010001 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010002
10003 advertising = (ADVERTISED_100baseT_Half |
10004 ADVERTISED_TP);
10005 }
10006 break;
10007
10008 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010009 if (cmd->duplex != DUPLEX_FULL) {
10010 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010011 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010012 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010013
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010014 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -080010015 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010016 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010017 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010018
10019 advertising = (ADVERTISED_1000baseT_Full |
10020 ADVERTISED_TP);
10021 break;
10022
10023 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010024 if (cmd->duplex != DUPLEX_FULL) {
10025 DP(NETIF_MSG_LINK,
10026 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010027 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010028 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010029
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010030 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -080010031 DP(NETIF_MSG_LINK,
10032 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010033 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010034 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010035
Eliezer Tamirf1410642008-02-28 11:51:50 -080010036 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010037 ADVERTISED_TP);
10038 break;
10039
10040 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010041 if (cmd->duplex != DUPLEX_FULL) {
10042 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010043 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010044 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010045
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010046 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -080010047 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010048 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -080010049 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010050
10051 advertising = (ADVERTISED_10000baseT_Full |
10052 ADVERTISED_FIBRE);
10053 break;
10054
10055 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -080010056 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010057 return -EINVAL;
10058 }
10059
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010060 bp->link_params.req_line_speed = cmd->speed;
10061 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010062 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010063 }
10064
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010065 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010066 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010067 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010068 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010069
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010070 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010071 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010072 bnx2x_link_set(bp);
10073 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010074
10075 return 0;
10076}
10077
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010078#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10079#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10080
10081static int bnx2x_get_regs_len(struct net_device *dev)
10082{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010083 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +000010084 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010085 int i;
10086
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010087 if (CHIP_IS_E1(bp)) {
10088 for (i = 0; i < REGS_COUNT; i++)
10089 if (IS_E1_ONLINE(reg_addrs[i].info))
10090 regdump_len += reg_addrs[i].size;
10091
10092 for (i = 0; i < WREGS_COUNT_E1; i++)
10093 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10094 regdump_len += wreg_addrs_e1[i].size *
10095 (1 + wreg_addrs_e1[i].read_regs_count);
10096
10097 } else { /* E1H */
10098 for (i = 0; i < REGS_COUNT; i++)
10099 if (IS_E1H_ONLINE(reg_addrs[i].info))
10100 regdump_len += reg_addrs[i].size;
10101
10102 for (i = 0; i < WREGS_COUNT_E1H; i++)
10103 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10104 regdump_len += wreg_addrs_e1h[i].size *
10105 (1 + wreg_addrs_e1h[i].read_regs_count);
10106 }
10107 regdump_len *= 4;
10108 regdump_len += sizeof(struct dump_hdr);
10109
10110 return regdump_len;
10111}
10112
10113static void bnx2x_get_regs(struct net_device *dev,
10114 struct ethtool_regs *regs, void *_p)
10115{
10116 u32 *p = _p, i, j;
10117 struct bnx2x *bp = netdev_priv(dev);
10118 struct dump_hdr dump_hdr = {0};
10119
10120 regs->version = 0;
10121 memset(p, 0, regs->len);
10122
10123 if (!netif_running(bp->dev))
10124 return;
10125
10126 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10127 dump_hdr.dump_sign = dump_sign_all;
10128 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10129 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10130 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10131 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10132 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10133
10134 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10135 p += dump_hdr.hdr_size + 1;
10136
10137 if (CHIP_IS_E1(bp)) {
10138 for (i = 0; i < REGS_COUNT; i++)
10139 if (IS_E1_ONLINE(reg_addrs[i].info))
10140 for (j = 0; j < reg_addrs[i].size; j++)
10141 *p++ = REG_RD(bp,
10142 reg_addrs[i].addr + j*4);
10143
10144 } else { /* E1H */
10145 for (i = 0; i < REGS_COUNT; i++)
10146 if (IS_E1H_ONLINE(reg_addrs[i].info))
10147 for (j = 0; j < reg_addrs[i].size; j++)
10148 *p++ = REG_RD(bp,
10149 reg_addrs[i].addr + j*4);
10150 }
10151}
10152
Eilon Greenstein0d28e492009-08-12 08:23:40 +000010153#define PHY_FW_VER_LEN 10
10154
10155static void bnx2x_get_drvinfo(struct net_device *dev,
10156 struct ethtool_drvinfo *info)
10157{
10158 struct bnx2x *bp = netdev_priv(dev);
10159 u8 phy_fw_ver[PHY_FW_VER_LEN];
10160
10161 strcpy(info->driver, DRV_MODULE_NAME);
10162 strcpy(info->version, DRV_MODULE_VERSION);
10163
10164 phy_fw_ver[0] = '\0';
10165 if (bp->port.pmf) {
10166 bnx2x_acquire_phy_lock(bp);
10167 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10168 (bp->state != BNX2X_STATE_CLOSED),
10169 phy_fw_ver, PHY_FW_VER_LEN);
10170 bnx2x_release_phy_lock(bp);
10171 }
10172
10173 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
10174 (bp->common.bc_ver & 0xff0000) >> 16,
10175 (bp->common.bc_ver & 0xff00) >> 8,
10176 (bp->common.bc_ver & 0xff),
10177 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
10178 strcpy(info->bus_info, pci_name(bp->pdev));
10179 info->n_stats = BNX2X_NUM_STATS;
10180 info->testinfo_len = BNX2X_NUM_TESTS;
10181 info->eedump_len = bp->common.flash_size;
10182 info->regdump_len = bnx2x_get_regs_len(dev);
10183}
10184
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010185static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10186{
10187 struct bnx2x *bp = netdev_priv(dev);
10188
10189 if (bp->flags & NO_WOL_FLAG) {
10190 wol->supported = 0;
10191 wol->wolopts = 0;
10192 } else {
10193 wol->supported = WAKE_MAGIC;
10194 if (bp->wol)
10195 wol->wolopts = WAKE_MAGIC;
10196 else
10197 wol->wolopts = 0;
10198 }
10199 memset(&wol->sopass, 0, sizeof(wol->sopass));
10200}
10201
10202static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10203{
10204 struct bnx2x *bp = netdev_priv(dev);
10205
10206 if (wol->wolopts & ~WAKE_MAGIC)
10207 return -EINVAL;
10208
10209 if (wol->wolopts & WAKE_MAGIC) {
10210 if (bp->flags & NO_WOL_FLAG)
10211 return -EINVAL;
10212
10213 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010214 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010215 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010216
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010217 return 0;
10218}
10219
10220static u32 bnx2x_get_msglevel(struct net_device *dev)
10221{
10222 struct bnx2x *bp = netdev_priv(dev);
10223
Joe Perches7995c642010-02-17 15:01:52 +000010224 return bp->msg_enable;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010225}
10226
10227static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10228{
10229 struct bnx2x *bp = netdev_priv(dev);
10230
10231 if (capable(CAP_NET_ADMIN))
Joe Perches7995c642010-02-17 15:01:52 +000010232 bp->msg_enable = level;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010233}
10234
10235static int bnx2x_nway_reset(struct net_device *dev)
10236{
10237 struct bnx2x *bp = netdev_priv(dev);
10238
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010239 if (!bp->port.pmf)
10240 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010241
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010242 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010244 bnx2x_link_set(bp);
10245 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010246
10247 return 0;
10248}
10249
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010250static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010251{
10252 struct bnx2x *bp = netdev_priv(dev);
10253
Eilon Greensteinf34d28e2009-10-15 00:18:08 -070010254 if (bp->flags & MF_FUNC_DIS)
10255 return 0;
10256
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010257 return bp->link_vars.link_up;
10258}
10259
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010260static int bnx2x_get_eeprom_len(struct net_device *dev)
10261{
10262 struct bnx2x *bp = netdev_priv(dev);
10263
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010264 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010265}
10266
10267static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10268{
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010269 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010270 int count, i;
10271 u32 val = 0;
10272
10273 /* adjust timeout for emulation/FPGA */
10274 count = NVRAM_TIMEOUT_COUNT;
10275 if (CHIP_REV_IS_SLOW(bp))
10276 count *= 100;
10277
10278 /* request access to nvram interface */
10279 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10280 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10281
10282 for (i = 0; i < count*10; i++) {
10283 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10284 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10285 break;
10286
10287 udelay(5);
10288 }
10289
10290 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010291 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010292 return -EBUSY;
10293 }
10294
10295 return 0;
10296}
10297
10298static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10299{
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010300 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010301 int count, i;
10302 u32 val = 0;
10303
10304 /* adjust timeout for emulation/FPGA */
10305 count = NVRAM_TIMEOUT_COUNT;
10306 if (CHIP_REV_IS_SLOW(bp))
10307 count *= 100;
10308
10309 /* relinquish nvram interface */
10310 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10311 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10312
10313 for (i = 0; i < count*10; i++) {
10314 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10315 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10316 break;
10317
10318 udelay(5);
10319 }
10320
10321 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010322 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010323 return -EBUSY;
10324 }
10325
10326 return 0;
10327}
10328
10329static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10330{
10331 u32 val;
10332
10333 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10334
10335 /* enable both bits, even on read */
10336 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10337 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10338 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10339}
10340
10341static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10342{
10343 u32 val;
10344
10345 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10346
10347 /* disable both bits, even after read */
10348 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10349 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10350 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10351}
10352
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010353static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010354 u32 cmd_flags)
10355{
Eliezer Tamirf1410642008-02-28 11:51:50 -080010356 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010357 u32 val;
10358
10359 /* build the command word */
10360 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10361
10362 /* need to clear DONE bit separately */
10363 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10364
10365 /* address of the NVRAM to read from */
10366 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10367 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10368
10369 /* issue a read command */
10370 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10371
10372 /* adjust timeout for emulation/FPGA */
10373 count = NVRAM_TIMEOUT_COUNT;
10374 if (CHIP_REV_IS_SLOW(bp))
10375 count *= 100;
10376
10377 /* wait for completion */
10378 *ret_val = 0;
10379 rc = -EBUSY;
10380 for (i = 0; i < count; i++) {
10381 udelay(5);
10382 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10383
10384 if (val & MCPR_NVM_COMMAND_DONE) {
10385 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010386 /* we read nvram data in cpu order
10387 * but ethtool sees it as an array of bytes
10388 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010389 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010390 rc = 0;
10391 break;
10392 }
10393 }
10394
10395 return rc;
10396}
10397
10398static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10399 int buf_size)
10400{
10401 int rc;
10402 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010403 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010404
10405 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010406 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010407 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010408 offset, buf_size);
10409 return -EINVAL;
10410 }
10411
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010412 if (offset + buf_size > bp->common.flash_size) {
10413 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010414 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010415 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010416 return -EINVAL;
10417 }
10418
10419 /* request access to nvram interface */
10420 rc = bnx2x_acquire_nvram_lock(bp);
10421 if (rc)
10422 return rc;
10423
10424 /* enable access to nvram interface */
10425 bnx2x_enable_nvram_access(bp);
10426
10427 /* read the first word(s) */
10428 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10429 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10430 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10431 memcpy(ret_buf, &val, 4);
10432
10433 /* advance to the next dword */
10434 offset += sizeof(u32);
10435 ret_buf += sizeof(u32);
10436 buf_size -= sizeof(u32);
10437 cmd_flags = 0;
10438 }
10439
10440 if (rc == 0) {
10441 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10442 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10443 memcpy(ret_buf, &val, 4);
10444 }
10445
10446 /* disable access to nvram interface */
10447 bnx2x_disable_nvram_access(bp);
10448 bnx2x_release_nvram_lock(bp);
10449
10450 return rc;
10451}
10452
10453static int bnx2x_get_eeprom(struct net_device *dev,
10454 struct ethtool_eeprom *eeprom, u8 *eebuf)
10455{
10456 struct bnx2x *bp = netdev_priv(dev);
10457 int rc;
10458
Eilon Greenstein2add3ac2009-01-14 06:44:07 +000010459 if (!netif_running(dev))
10460 return -EAGAIN;
10461
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010462 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010463 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10464 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10465 eeprom->len, eeprom->len);
10466
10467 /* parameters already validated in ethtool_get_eeprom */
10468
10469 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10470
10471 return rc;
10472}
10473
10474static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10475 u32 cmd_flags)
10476{
Eliezer Tamirf1410642008-02-28 11:51:50 -080010477 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010478
10479 /* build the command word */
10480 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10481
10482 /* need to clear DONE bit separately */
10483 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10484
10485 /* write the data */
10486 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10487
10488 /* address of the NVRAM to write to */
10489 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10490 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10491
10492 /* issue the write command */
10493 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10494
10495 /* adjust timeout for emulation/FPGA */
10496 count = NVRAM_TIMEOUT_COUNT;
10497 if (CHIP_REV_IS_SLOW(bp))
10498 count *= 100;
10499
10500 /* wait for completion */
10501 rc = -EBUSY;
10502 for (i = 0; i < count; i++) {
10503 udelay(5);
10504 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10505 if (val & MCPR_NVM_COMMAND_DONE) {
10506 rc = 0;
10507 break;
10508 }
10509 }
10510
10511 return rc;
10512}
10513
Eliezer Tamirf1410642008-02-28 11:51:50 -080010514#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010515
10516static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10517 int buf_size)
10518{
10519 int rc;
10520 u32 cmd_flags;
10521 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010522 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010523
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010524 if (offset + buf_size > bp->common.flash_size) {
10525 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010526 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010527 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010528 return -EINVAL;
10529 }
10530
10531 /* request access to nvram interface */
10532 rc = bnx2x_acquire_nvram_lock(bp);
10533 if (rc)
10534 return rc;
10535
10536 /* enable access to nvram interface */
10537 bnx2x_enable_nvram_access(bp);
10538
10539 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10540 align_offset = (offset & ~0x03);
10541 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10542
10543 if (rc == 0) {
10544 val &= ~(0xff << BYTE_OFFSET(offset));
10545 val |= (*data_buf << BYTE_OFFSET(offset));
10546
10547 /* nvram data is returned as an array of bytes
10548 * convert it back to cpu order */
10549 val = be32_to_cpu(val);
10550
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010551 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10552 cmd_flags);
10553 }
10554
10555 /* disable access to nvram interface */
10556 bnx2x_disable_nvram_access(bp);
10557 bnx2x_release_nvram_lock(bp);
10558
10559 return rc;
10560}
10561
10562static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10563 int buf_size)
10564{
10565 int rc;
10566 u32 cmd_flags;
10567 u32 val;
10568 u32 written_so_far;
10569
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010570 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010571 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010572
10573 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010574 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -080010575 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010576 offset, buf_size);
10577 return -EINVAL;
10578 }
10579
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010580 if (offset + buf_size > bp->common.flash_size) {
10581 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010582 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010583 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010584 return -EINVAL;
10585 }
10586
10587 /* request access to nvram interface */
10588 rc = bnx2x_acquire_nvram_lock(bp);
10589 if (rc)
10590 return rc;
10591
10592 /* enable access to nvram interface */
10593 bnx2x_enable_nvram_access(bp);
10594
10595 written_so_far = 0;
10596 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10597 while ((written_so_far < buf_size) && (rc == 0)) {
10598 if (written_so_far == (buf_size - sizeof(u32)))
10599 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10600 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10601 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10602 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10603 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10604
10605 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010606
10607 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10608
10609 /* advance to the next dword */
10610 offset += sizeof(u32);
10611 data_buf += sizeof(u32);
10612 written_so_far += sizeof(u32);
10613 cmd_flags = 0;
10614 }
10615
10616 /* disable access to nvram interface */
10617 bnx2x_disable_nvram_access(bp);
10618 bnx2x_release_nvram_lock(bp);
10619
10620 return rc;
10621}
10622
10623static int bnx2x_set_eeprom(struct net_device *dev,
10624 struct ethtool_eeprom *eeprom, u8 *eebuf)
10625{
10626 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010627 int port = BP_PORT(bp);
10628 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010629
Eilon Greenstein9f4c9582009-01-08 11:21:43 -080010630 if (!netif_running(dev))
10631 return -EAGAIN;
10632
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010633 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010634 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10635 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10636 eeprom->len, eeprom->len);
10637
10638 /* parameters already validated in ethtool_set_eeprom */
10639
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010640 /* PHY eeprom can be accessed only by the PMF */
10641 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10642 !bp->port.pmf)
10643 return -EINVAL;
10644
10645 if (eeprom->magic == 0x50485950) {
10646 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10647 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10648
10649 bnx2x_acquire_phy_lock(bp);
10650 rc |= bnx2x_link_reset(&bp->link_params,
10651 &bp->link_vars, 0);
10652 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10653 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10654 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10655 MISC_REGISTERS_GPIO_HIGH, port);
10656 bnx2x_release_phy_lock(bp);
10657 bnx2x_link_report(bp);
10658
10659 } else if (eeprom->magic == 0x50485952) {
10660 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -070010661 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010662 bnx2x_acquire_phy_lock(bp);
10663 rc |= bnx2x_link_reset(&bp->link_params,
10664 &bp->link_vars, 1);
10665
10666 rc |= bnx2x_phy_init(&bp->link_params,
10667 &bp->link_vars);
10668 bnx2x_release_phy_lock(bp);
10669 bnx2x_calc_fc_adv(bp);
10670 }
10671 } else if (eeprom->magic == 0x53985943) {
10672 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10673 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10675 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +000010676 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010677
10678 /* DSP Remove Download Mode */
10679 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10680 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010681
Yitchak Gertner4a37fb62008-08-13 15:50:23 -070010682 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010683
Eilon Greensteinf57a6022009-08-12 08:23:11 +000010684 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10685
10686 /* wait 0.5 sec to allow it to run */
10687 msleep(500);
10688 bnx2x_ext_phy_hw_reset(bp, port);
10689 msleep(500);
10690 bnx2x_release_phy_lock(bp);
10691 }
10692 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010693 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010694
10695 return rc;
10696}
10697
10698static int bnx2x_get_coalesce(struct net_device *dev,
10699 struct ethtool_coalesce *coal)
10700{
10701 struct bnx2x *bp = netdev_priv(dev);
10702
10703 memset(coal, 0, sizeof(struct ethtool_coalesce));
10704
10705 coal->rx_coalesce_usecs = bp->rx_ticks;
10706 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010707
10708 return 0;
10709}
10710
Eilon Greensteinca003922009-08-12 22:53:28 -070010711#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010712static int bnx2x_set_coalesce(struct net_device *dev,
10713 struct ethtool_coalesce *coal)
10714{
10715 struct bnx2x *bp = netdev_priv(dev);
10716
10717 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -070010718 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
10719 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010720
10721 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -070010722 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
10723 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010724
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010725 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010726 bnx2x_update_coalesce(bp);
10727
10728 return 0;
10729}
10730
10731static void bnx2x_get_ringparam(struct net_device *dev,
10732 struct ethtool_ringparam *ering)
10733{
10734 struct bnx2x *bp = netdev_priv(dev);
10735
10736 ering->rx_max_pending = MAX_RX_AVAIL;
10737 ering->rx_mini_max_pending = 0;
10738 ering->rx_jumbo_max_pending = 0;
10739
10740 ering->rx_pending = bp->rx_ring_size;
10741 ering->rx_mini_pending = 0;
10742 ering->rx_jumbo_pending = 0;
10743
10744 ering->tx_max_pending = MAX_TX_AVAIL;
10745 ering->tx_pending = bp->tx_ring_size;
10746}
10747
10748static int bnx2x_set_ringparam(struct net_device *dev,
10749 struct ethtool_ringparam *ering)
10750{
10751 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010752 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010753
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000010754 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10755 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10756 return -EAGAIN;
10757 }
10758
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010759 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10760 (ering->tx_pending > MAX_TX_AVAIL) ||
10761 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10762 return -EINVAL;
10763
10764 bp->rx_ring_size = ering->rx_pending;
10765 bp->tx_ring_size = ering->tx_pending;
10766
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010767 if (netif_running(dev)) {
10768 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10769 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010770 }
10771
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010772 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010773}
10774
10775static void bnx2x_get_pauseparam(struct net_device *dev,
10776 struct ethtool_pauseparam *epause)
10777{
10778 struct bnx2x *bp = netdev_priv(dev);
10779
Eilon Greenstein356e2382009-02-12 08:38:32 +000010780 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10781 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010782 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10783
David S. Millerc0700f92008-12-16 23:53:20 -080010784 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10785 BNX2X_FLOW_CTRL_RX);
10786 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10787 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010788
10789 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10790 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10791 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10792}
10793
10794static int bnx2x_set_pauseparam(struct net_device *dev,
10795 struct ethtool_pauseparam *epause)
10796{
10797 struct bnx2x *bp = netdev_priv(dev);
10798
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010799 if (IS_E1HMF(bp))
10800 return 0;
10801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010802 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10803 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10804 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10805
David S. Millerc0700f92008-12-16 23:53:20 -080010806 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010807
10808 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -080010809 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010810
10811 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -080010812 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010813
David S. Millerc0700f92008-12-16 23:53:20 -080010814 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10815 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010816
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010817 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010818 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -070010819 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -080010820 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010821 }
10822
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010823 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -080010824 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010825 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010826
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010827 DP(NETIF_MSG_LINK,
10828 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010829
10830 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010831 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010832 bnx2x_link_set(bp);
10833 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010834
10835 return 0;
10836}
10837
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010838static int bnx2x_set_flags(struct net_device *dev, u32 data)
10839{
10840 struct bnx2x *bp = netdev_priv(dev);
10841 int changed = 0;
10842 int rc = 0;
10843
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000010844 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10845 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10846 return -EAGAIN;
10847 }
10848
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010849 /* TPA requires Rx CSUM offloading */
10850 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
Vladislav Zolotarovd43a7e62010-02-17 02:03:40 +000010851 if (!disable_tpa) {
10852 if (!(dev->features & NETIF_F_LRO)) {
10853 dev->features |= NETIF_F_LRO;
10854 bp->flags |= TPA_ENABLE_FLAG;
10855 changed = 1;
10856 }
10857 } else
10858 rc = -EINVAL;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010859 } else if (dev->features & NETIF_F_LRO) {
10860 dev->features &= ~NETIF_F_LRO;
10861 bp->flags &= ~TPA_ENABLE_FLAG;
10862 changed = 1;
10863 }
10864
10865 if (changed && netif_running(dev)) {
10866 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10867 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10868 }
10869
10870 return rc;
10871}
10872
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010873static u32 bnx2x_get_rx_csum(struct net_device *dev)
10874{
10875 struct bnx2x *bp = netdev_priv(dev);
10876
10877 return bp->rx_csum;
10878}
10879
10880static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10881{
10882 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010883 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010884
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000010885 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10886 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10887 return -EAGAIN;
10888 }
10889
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010890 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010891
10892 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10893 TPA'ed packets will be discarded due to wrong TCP CSUM */
10894 if (!data) {
10895 u32 flags = ethtool_op_get_flags(dev);
10896
10897 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10898 }
10899
10900 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010901}
10902
10903static int bnx2x_set_tso(struct net_device *dev, u32 data)
10904{
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010905 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010906 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010907 dev->features |= NETIF_F_TSO6;
10908 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010909 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070010910 dev->features &= ~NETIF_F_TSO6;
10911 }
10912
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010913 return 0;
10914}
10915
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010916static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010917 char string[ETH_GSTRING_LEN];
10918} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010919 { "register_test (offline)" },
10920 { "memory_test (offline)" },
10921 { "loopback_test (offline)" },
10922 { "nvram_test (online)" },
10923 { "interrupt_test (online)" },
10924 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010925 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010926};
10927
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010928static int bnx2x_test_registers(struct bnx2x *bp)
10929{
10930 int idx, i, rc = -ENODEV;
10931 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010932 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010933 static const struct {
10934 u32 offset0;
10935 u32 offset1;
10936 u32 mask;
10937 } reg_tbl[] = {
10938/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10939 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10940 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10941 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10942 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10943 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10944 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10945 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10946 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10947 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10948/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10949 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10950 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10951 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10952 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10953 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10954 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10955 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010956 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010957 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10958/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010959 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10960 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10961 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10962 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10963 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10964 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10965 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10966 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010967 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10968/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010969 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10970 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10971 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10972 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10973 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10974 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10975
10976 { 0xffffffff, 0, 0x00000000 }
10977 };
10978
10979 if (!netif_running(bp->dev))
10980 return rc;
10981
10982 /* Repeat the test twice:
10983 First by writing 0x00000000, second by writing 0xffffffff */
10984 for (idx = 0; idx < 2; idx++) {
10985
10986 switch (idx) {
10987 case 0:
10988 wr_val = 0;
10989 break;
10990 case 1:
10991 wr_val = 0xffffffff;
10992 break;
10993 }
10994
10995 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10996 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010997
10998 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10999 mask = reg_tbl[i].mask;
11000
11001 save_val = REG_RD(bp, offset);
11002
11003 REG_WR(bp, offset, wr_val);
11004 val = REG_RD(bp, offset);
11005
11006 /* Restore the original register's value */
11007 REG_WR(bp, offset, save_val);
11008
11009 /* verify that value is as expected value */
11010 if ((val & mask) != (wr_val & mask))
11011 goto test_reg_exit;
11012 }
11013 }
11014
11015 rc = 0;
11016
11017test_reg_exit:
11018 return rc;
11019}
11020
11021static int bnx2x_test_memory(struct bnx2x *bp)
11022{
11023 int i, j, rc = -ENODEV;
11024 u32 val;
11025 static const struct {
11026 u32 offset;
11027 int size;
11028 } mem_tbl[] = {
11029 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11030 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11031 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11032 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11033 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11034 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11035 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11036
11037 { 0xffffffff, 0 }
11038 };
11039 static const struct {
11040 char *name;
11041 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011042 u32 e1_mask;
11043 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011044 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011045 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11046 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11047 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11048 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11049 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11050 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011051
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011052 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011053 };
11054
11055 if (!netif_running(bp->dev))
11056 return rc;
11057
11058 /* Go through all the memories */
11059 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11060 for (j = 0; j < mem_tbl[i].size; j++)
11061 REG_RD(bp, mem_tbl[i].offset + j*4);
11062
11063 /* Check the parity status */
11064 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11065 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070011066 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11067 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011068 DP(NETIF_MSG_HW,
11069 "%s is 0x%x\n", prty_tbl[i].name, val);
11070 goto test_mem_exit;
11071 }
11072 }
11073
11074 rc = 0;
11075
11076test_mem_exit:
11077 return rc;
11078}
11079
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011080static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11081{
11082 int cnt = 1000;
11083
11084 if (link_up)
11085 while (bnx2x_link_test(bp) && cnt--)
11086 msleep(10);
11087}
11088
11089static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11090{
11091 unsigned int pkt_size, num_pkts, i;
11092 struct sk_buff *skb;
11093 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070011094 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011095 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011096 u16 tx_start_idx, tx_idx;
11097 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070011098 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011099 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011100 struct eth_tx_start_bd *tx_start_bd;
11101 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011102 dma_addr_t mapping;
11103 union eth_rx_cqe *cqe;
11104 u8 cqe_fp_flags;
11105 struct sw_rx_bd *rx_buf;
11106 u16 len;
11107 int rc = -ENODEV;
11108
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011109 /* check the loopback mode */
11110 switch (loopback_mode) {
11111 case BNX2X_PHY_LOOPBACK:
11112 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11113 return -EINVAL;
11114 break;
11115 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011116 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011117 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011118 break;
11119 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011120 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011121 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011122
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011123 /* prepare the loopback packet */
11124 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11125 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011126 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11127 if (!skb) {
11128 rc = -ENOMEM;
11129 goto test_loopback_exit;
11130 }
11131 packet = skb_put(skb, pkt_size);
11132 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070011133 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11134 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011135 for (i = ETH_HLEN; i < pkt_size; i++)
11136 packet[i] = (unsigned char) (i & 0xff);
11137
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011138 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011139 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011140 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11141 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011142
Eilon Greensteinca003922009-08-12 22:53:28 -070011143 pkt_prod = fp_tx->tx_pkt_prod++;
11144 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11145 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011146 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011147 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011148
Eilon Greensteinca003922009-08-12 22:53:28 -070011149 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11150 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
FUJITA Tomonori1a983142010-04-04 01:51:03 +000011151 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11152 skb_headlen(skb), DMA_TO_DEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070011153 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11154 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11155 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11156 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11157 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11158 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11159 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11160 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11161
11162 /* turn on parsing and get a BD */
11163 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11164 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11165
11166 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011167
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011168 wmb();
11169
Eilon Greensteinca003922009-08-12 22:53:28 -070011170 fp_tx->tx_db.data.prod += 2;
11171 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011172 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011173
11174 mmiowb();
11175
11176 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070011177 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011178
11179 udelay(100);
11180
Eilon Greensteinca003922009-08-12 22:53:28 -070011181 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011182 if (tx_idx != tx_start_idx + num_pkts)
11183 goto test_loopback_exit;
11184
Eilon Greensteinca003922009-08-12 22:53:28 -070011185 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011186 if (rx_idx != rx_start_idx + num_pkts)
11187 goto test_loopback_exit;
11188
Eilon Greensteinca003922009-08-12 22:53:28 -070011189 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011190 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11191 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11192 goto test_loopback_rx_exit;
11193
11194 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11195 if (len != pkt_size)
11196 goto test_loopback_rx_exit;
11197
Eilon Greensteinca003922009-08-12 22:53:28 -070011198 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011199 skb = rx_buf->skb;
11200 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11201 for (i = ETH_HLEN; i < pkt_size; i++)
11202 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11203 goto test_loopback_rx_exit;
11204
11205 rc = 0;
11206
11207test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011208
Eilon Greensteinca003922009-08-12 22:53:28 -070011209 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11210 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11211 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11212 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011213
11214 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070011215 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11216 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011217
11218test_loopback_exit:
11219 bp->link_params.loopback_mode = LOOPBACK_NONE;
11220
11221 return rc;
11222}
11223
11224static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11225{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011226 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011227
11228 if (!netif_running(bp->dev))
11229 return BNX2X_LOOPBACK_FAILED;
11230
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070011231 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000011232 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011233
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011234 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11235 if (res) {
11236 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11237 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011238 }
11239
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000011240 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11241 if (res) {
11242 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11243 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011244 }
11245
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000011246 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011247 bnx2x_netif_start(bp);
11248
11249 return rc;
11250}
11251
11252#define CRC32_RESIDUAL 0xdebb20e3
11253
11254static int bnx2x_test_nvram(struct bnx2x *bp)
11255{
11256 static const struct {
11257 int offset;
11258 int size;
11259 } nvram_tbl[] = {
11260 { 0, 0x14 }, /* bootstrap */
11261 { 0x14, 0xec }, /* dir */
11262 { 0x100, 0x350 }, /* manuf_info */
11263 { 0x450, 0xf0 }, /* feature_info */
11264 { 0x640, 0x64 }, /* upgrade_key_info */
11265 { 0x6a4, 0x64 },
11266 { 0x708, 0x70 }, /* manuf_key_info */
11267 { 0x778, 0x70 },
11268 { 0, 0 }
11269 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011270 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011271 u8 *data = (u8 *)buf;
11272 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011273 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011274
11275 rc = bnx2x_nvram_read(bp, 0, data, 4);
11276 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000011277 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011278 goto test_nvram_exit;
11279 }
11280
11281 magic = be32_to_cpu(buf[0]);
11282 if (magic != 0x669955aa) {
11283 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11284 rc = -ENODEV;
11285 goto test_nvram_exit;
11286 }
11287
11288 for (i = 0; nvram_tbl[i].size; i++) {
11289
11290 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11291 nvram_tbl[i].size);
11292 if (rc) {
11293 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000011294 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011295 goto test_nvram_exit;
11296 }
11297
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011298 crc = ether_crc_le(nvram_tbl[i].size, data);
11299 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011300 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011301 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011302 rc = -ENODEV;
11303 goto test_nvram_exit;
11304 }
11305 }
11306
11307test_nvram_exit:
11308 return rc;
11309}
11310
11311static int bnx2x_test_intr(struct bnx2x *bp)
11312{
11313 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11314 int i, rc;
11315
11316 if (!netif_running(bp->dev))
11317 return -ENODEV;
11318
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011319 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000011320 if (CHIP_IS_E1(bp))
Vladislav Zolotarov0c43f432010-02-17 02:04:00 +000011321 /* use last unicast entries */
11322 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
Eilon Greensteinaf246402009-01-14 06:43:59 +000011323 else
11324 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000011325 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011326 config->hdr.reserved1 = 0;
11327
Michael Chane665bfd2009-10-10 13:46:54 +000011328 bp->set_mac_pending++;
11329 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011330 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11331 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11332 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11333 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011334 for (i = 0; i < 10; i++) {
11335 if (!bp->set_mac_pending)
11336 break;
Michael Chane665bfd2009-10-10 13:46:54 +000011337 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011338 msleep_interruptible(10);
11339 }
11340 if (i == 10)
11341 rc = -ENODEV;
11342 }
11343
11344 return rc;
11345}
11346
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011347static void bnx2x_self_test(struct net_device *dev,
11348 struct ethtool_test *etest, u64 *buf)
11349{
11350 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011351
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000011352 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11353 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11354 etest->flags |= ETH_TEST_FL_FAILED;
11355 return;
11356 }
11357
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011358 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11359
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011360 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011361 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011362
Eilon Greenstein33471622008-08-13 15:59:08 -070011363 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011364 if (IS_E1HMF(bp))
11365 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11366
11367 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000011368 int port = BP_PORT(bp);
11369 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011370 u8 link_up;
11371
Eilon Greenstein279abdf2009-07-21 05:47:22 +000011372 /* save current value of input enable for TX port IF */
11373 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11374 /* disable input for TX port IF */
11375 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11376
Eilon Greenstein061bc702009-10-15 00:18:47 -070011377 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011378 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11379 bnx2x_nic_load(bp, LOAD_DIAG);
11380 /* wait until link state is restored */
11381 bnx2x_wait_for_link(bp, link_up);
11382
11383 if (bnx2x_test_registers(bp) != 0) {
11384 buf[0] = 1;
11385 etest->flags |= ETH_TEST_FL_FAILED;
11386 }
11387 if (bnx2x_test_memory(bp) != 0) {
11388 buf[1] = 1;
11389 etest->flags |= ETH_TEST_FL_FAILED;
11390 }
11391 buf[2] = bnx2x_test_loopback(bp, link_up);
11392 if (buf[2] != 0)
11393 etest->flags |= ETH_TEST_FL_FAILED;
11394
11395 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000011396
11397 /* restore input for TX port IF */
11398 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11399
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011400 bnx2x_nic_load(bp, LOAD_NORMAL);
11401 /* wait until link state is restored */
11402 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011403 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011404 if (bnx2x_test_nvram(bp) != 0) {
11405 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011406 etest->flags |= ETH_TEST_FL_FAILED;
11407 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011408 if (bnx2x_test_intr(bp) != 0) {
11409 buf[4] = 1;
11410 etest->flags |= ETH_TEST_FL_FAILED;
11411 }
11412 if (bp->port.pmf)
11413 if (bnx2x_link_test(bp) != 0) {
11414 buf[5] = 1;
11415 etest->flags |= ETH_TEST_FL_FAILED;
11416 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070011417
11418#ifdef BNX2X_EXTRA_DEBUG
11419 bnx2x_panic_dump(bp);
11420#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011421}
11422
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011423static const struct {
11424 long offset;
11425 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000011426 u8 string[ETH_GSTRING_LEN];
11427} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11428/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11429 { Q_STATS_OFFSET32(error_bytes_received_hi),
11430 8, "[%d]: rx_error_bytes" },
11431 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11432 8, "[%d]: rx_ucast_packets" },
11433 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11434 8, "[%d]: rx_mcast_packets" },
11435 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11436 8, "[%d]: rx_bcast_packets" },
11437 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11438 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11439 4, "[%d]: rx_phy_ip_err_discards"},
11440 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11441 4, "[%d]: rx_skb_alloc_discard" },
11442 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11443
11444/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11445 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11446 8, "[%d]: tx_packets" }
11447};
11448
11449static const struct {
11450 long offset;
11451 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011452 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011453#define STATS_FLAGS_PORT 1
11454#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000011455#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011456 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011457} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000011458/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11459 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011460 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011461 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011462 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011463 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011464 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011465 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011466 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000011467 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011468 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011469 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011470 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011471 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011472 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11473 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11474 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11475 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11476/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11477 8, STATS_FLAGS_PORT, "rx_fragments" },
11478 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11479 8, STATS_FLAGS_PORT, "rx_jabbers" },
11480 { STATS_OFFSET32(no_buff_discard_hi),
11481 8, STATS_FLAGS_BOTH, "rx_discards" },
11482 { STATS_OFFSET32(mac_filter_discard),
11483 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11484 { STATS_OFFSET32(xxoverflow_discard),
11485 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11486 { STATS_OFFSET32(brb_drop_hi),
11487 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11488 { STATS_OFFSET32(brb_truncate_hi),
11489 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11490 { STATS_OFFSET32(pause_frames_received_hi),
11491 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11492 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11493 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11494 { STATS_OFFSET32(nig_timer_max),
11495 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11496/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11497 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11498 { STATS_OFFSET32(rx_skb_alloc_failed),
11499 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11500 { STATS_OFFSET32(hw_csum_err),
11501 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11502
11503 { STATS_OFFSET32(total_bytes_transmitted_hi),
11504 8, STATS_FLAGS_BOTH, "tx_bytes" },
11505 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11506 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11507 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11508 8, STATS_FLAGS_BOTH, "tx_packets" },
11509 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11510 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11511 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11512 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011513 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011514 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011515 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011516 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011517/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011518 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011519 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011520 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011521 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011522 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011523 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011524 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011525 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011526 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011527 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011528 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011529 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011530 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011531 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011532 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011533 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011534 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011535 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011536 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011537/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011538 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000011539 { STATS_OFFSET32(pause_frames_sent_hi),
11540 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011541};
11542
Eilon Greensteinde832a52009-02-12 08:36:33 +000011543#define IS_PORT_STAT(i) \
11544 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11545#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11546#define IS_E1HMF_MODE_STAT(bp) \
Joe Perches7995c642010-02-17 15:01:52 +000011547 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070011548
Ben Hutchings15f0a392009-10-01 11:58:24 +000011549static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11550{
11551 struct bnx2x *bp = netdev_priv(dev);
11552 int i, num_stats;
11553
11554 switch(stringset) {
11555 case ETH_SS_STATS:
11556 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011557 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000011558 if (!IS_E1HMF_MODE_STAT(bp))
11559 num_stats += BNX2X_NUM_STATS;
11560 } else {
11561 if (IS_E1HMF_MODE_STAT(bp)) {
11562 num_stats = 0;
11563 for (i = 0; i < BNX2X_NUM_STATS; i++)
11564 if (IS_FUNC_STAT(i))
11565 num_stats++;
11566 } else
11567 num_stats = BNX2X_NUM_STATS;
11568 }
11569 return num_stats;
11570
11571 case ETH_SS_TEST:
11572 return BNX2X_NUM_TESTS;
11573
11574 default:
11575 return -EINVAL;
11576 }
11577}
11578
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011579static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11580{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011581 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000011582 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011583
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011584 switch (stringset) {
11585 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000011586 if (is_multi(bp)) {
11587 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011588 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000011589 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11590 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11591 bnx2x_q_stats_arr[j].string, i);
11592 k += BNX2X_NUM_Q_STATS;
11593 }
11594 if (IS_E1HMF_MODE_STAT(bp))
11595 break;
11596 for (j = 0; j < BNX2X_NUM_STATS; j++)
11597 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11598 bnx2x_stats_arr[j].string);
11599 } else {
11600 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11601 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11602 continue;
11603 strcpy(buf + j*ETH_GSTRING_LEN,
11604 bnx2x_stats_arr[i].string);
11605 j++;
11606 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011607 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011608 break;
11609
11610 case ETH_SS_TEST:
11611 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11612 break;
11613 }
11614}
11615
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011616static void bnx2x_get_ethtool_stats(struct net_device *dev,
11617 struct ethtool_stats *stats, u64 *buf)
11618{
11619 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000011620 u32 *hw_stats, *offset;
11621 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011622
Eilon Greensteinde832a52009-02-12 08:36:33 +000011623 if (is_multi(bp)) {
11624 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011625 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000011626 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11627 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11628 if (bnx2x_q_stats_arr[j].size == 0) {
11629 /* skip this counter */
11630 buf[k + j] = 0;
11631 continue;
11632 }
11633 offset = (hw_stats +
11634 bnx2x_q_stats_arr[j].offset);
11635 if (bnx2x_q_stats_arr[j].size == 4) {
11636 /* 4-byte counter */
11637 buf[k + j] = (u64) *offset;
11638 continue;
11639 }
11640 /* 8-byte counter */
11641 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11642 }
11643 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011644 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000011645 if (IS_E1HMF_MODE_STAT(bp))
11646 return;
11647 hw_stats = (u32 *)&bp->eth_stats;
11648 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11649 if (bnx2x_stats_arr[j].size == 0) {
11650 /* skip this counter */
11651 buf[k + j] = 0;
11652 continue;
11653 }
11654 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11655 if (bnx2x_stats_arr[j].size == 4) {
11656 /* 4-byte counter */
11657 buf[k + j] = (u64) *offset;
11658 continue;
11659 }
11660 /* 8-byte counter */
11661 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011662 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000011663 } else {
11664 hw_stats = (u32 *)&bp->eth_stats;
11665 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11666 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11667 continue;
11668 if (bnx2x_stats_arr[i].size == 0) {
11669 /* skip this counter */
11670 buf[j] = 0;
11671 j++;
11672 continue;
11673 }
11674 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11675 if (bnx2x_stats_arr[i].size == 4) {
11676 /* 4-byte counter */
11677 buf[j] = (u64) *offset;
11678 j++;
11679 continue;
11680 }
11681 /* 8-byte counter */
11682 buf[j] = HILO_U64(*offset, *(offset + 1));
11683 j++;
11684 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011685 }
11686}
11687
11688static int bnx2x_phys_id(struct net_device *dev, u32 data)
11689{
11690 struct bnx2x *bp = netdev_priv(dev);
11691 int i;
11692
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011693 if (!netif_running(dev))
11694 return 0;
11695
11696 if (!bp->port.pmf)
11697 return 0;
11698
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011699 if (data == 0)
11700 data = 2;
11701
11702 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011703 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020011704 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11705 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011706 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020011707 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011709 msleep_interruptible(500);
11710 if (signal_pending(current))
11711 break;
11712 }
11713
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011714 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020011715 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11716 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011717
11718 return 0;
11719}
11720
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070011721static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011722 .get_settings = bnx2x_get_settings,
11723 .set_settings = bnx2x_set_settings,
11724 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000011725 .get_regs_len = bnx2x_get_regs_len,
11726 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011727 .get_wol = bnx2x_get_wol,
11728 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011729 .get_msglevel = bnx2x_get_msglevel,
11730 .set_msglevel = bnx2x_set_msglevel,
11731 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070011732 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011733 .get_eeprom_len = bnx2x_get_eeprom_len,
11734 .get_eeprom = bnx2x_get_eeprom,
11735 .set_eeprom = bnx2x_set_eeprom,
11736 .get_coalesce = bnx2x_get_coalesce,
11737 .set_coalesce = bnx2x_set_coalesce,
11738 .get_ringparam = bnx2x_get_ringparam,
11739 .set_ringparam = bnx2x_set_ringparam,
11740 .get_pauseparam = bnx2x_get_pauseparam,
11741 .set_pauseparam = bnx2x_set_pauseparam,
11742 .get_rx_csum = bnx2x_get_rx_csum,
11743 .set_rx_csum = bnx2x_set_rx_csum,
11744 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011745 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011746 .set_flags = bnx2x_set_flags,
11747 .get_flags = ethtool_op_get_flags,
11748 .get_sg = ethtool_op_get_sg,
11749 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011750 .get_tso = ethtool_op_get_tso,
11751 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011752 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000011753 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070011754 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011755 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011756 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011757};
11758
11759/* end of ethtool_ops */
11760
11761/****************************************************************************
11762* General service functions
11763****************************************************************************/
11764
11765static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11766{
11767 u16 pmcsr;
11768
11769 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11770
11771 switch (state) {
11772 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011773 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011774 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11775 PCI_PM_CTRL_PME_STATUS));
11776
11777 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070011778 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011779 msleep(20);
11780 break;
11781
11782 case PCI_D3hot:
11783 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11784 pmcsr |= 3;
11785
11786 if (bp->wol)
11787 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11788
11789 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11790 pmcsr);
11791
11792 /* No more memory access after this point until
11793 * device is brought back to D0.
11794 */
11795 break;
11796
11797 default:
11798 return -EINVAL;
11799 }
11800 return 0;
11801}
11802
Eilon Greenstein237907c2009-01-14 06:42:44 +000011803static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11804{
11805 u16 rx_cons_sb;
11806
11807 /* Tell compiler that status block fields can change */
11808 barrier();
11809 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11810 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11811 rx_cons_sb++;
11812 return (fp->rx_comp_cons != rx_cons_sb);
11813}
11814
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011815/*
11816 * net_device service functions
11817 */
11818
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011819static int bnx2x_poll(struct napi_struct *napi, int budget)
11820{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011821 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011822 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11823 napi);
11824 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011825
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011826 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011827#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011828 if (unlikely(bp->panic)) {
11829 napi_complete(napi);
11830 return 0;
11831 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011832#endif
11833
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011834 if (bnx2x_has_tx_work(fp))
11835 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011836
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011837 if (bnx2x_has_rx_work(fp)) {
11838 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011839
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011840 /* must not complete if we consumed full budget */
11841 if (work_done >= budget)
11842 break;
11843 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000011844
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011845 /* Fall out from the NAPI loop if needed */
11846 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11847 bnx2x_update_fpsb_idx(fp);
11848 /* bnx2x_has_rx_work() reads the status block, thus we need
11849 * to ensure that status block indices have been actually read
11850 * (bnx2x_update_fpsb_idx) prior to this check
11851 * (bnx2x_has_rx_work) so that we won't write the "newer"
11852 * value of the status block to IGU (if there was a DMA right
11853 * after bnx2x_has_rx_work and if there is no rmb, the memory
11854 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11855 * before bnx2x_ack_sb). In this case there will never be
11856 * another interrupt until there is another update of the
11857 * status block, while there is still unhandled work.
11858 */
11859 rmb();
11860
11861 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11862 napi_complete(napi);
11863 /* Re-enable interrupts */
11864 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11865 le16_to_cpu(fp->fp_c_idx),
11866 IGU_INT_NOP, 1);
11867 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11868 le16_to_cpu(fp->fp_u_idx),
11869 IGU_INT_ENABLE, 1);
11870 break;
11871 }
11872 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000011873 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011874
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011875 return work_done;
11876}
11877
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011878
11879/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070011880 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011881 * we use one mapping for both BDs
11882 * So far this has only been observed to happen
11883 * in Other Operating Systems(TM)
11884 */
11885static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11886 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070011887 struct sw_tx_bd *tx_buf,
11888 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011889 u16 bd_prod, int nbd)
11890{
Eilon Greensteinca003922009-08-12 22:53:28 -070011891 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011892 struct eth_tx_bd *d_tx_bd;
11893 dma_addr_t mapping;
11894 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11895
11896 /* first fix first BD */
11897 h_tx_bd->nbd = cpu_to_le16(nbd);
11898 h_tx_bd->nbytes = cpu_to_le16(hlen);
11899
11900 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11901 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11902 h_tx_bd->addr_lo, h_tx_bd->nbd);
11903
11904 /* now get a new data BD
11905 * (after the pbd) and fill it */
11906 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011907 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011908
11909 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11910 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11911
11912 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11913 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11914 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011915
11916 /* this marks the BD as one that has no individual mapping */
11917 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11918
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011919 DP(NETIF_MSG_TX_QUEUED,
11920 "TSO split data size is %d (%x:%x)\n",
11921 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11922
Eilon Greensteinca003922009-08-12 22:53:28 -070011923 /* update tx_bd */
11924 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011925
11926 return bd_prod;
11927}
11928
11929static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11930{
11931 if (fix > 0)
11932 csum = (u16) ~csum_fold(csum_sub(csum,
11933 csum_partial(t_header - fix, fix, 0)));
11934
11935 else if (fix < 0)
11936 csum = (u16) ~csum_fold(csum_add(csum,
11937 csum_partial(t_header, -fix, 0)));
11938
11939 return swab16(csum);
11940}
11941
11942static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11943{
11944 u32 rc;
11945
11946 if (skb->ip_summed != CHECKSUM_PARTIAL)
11947 rc = XMIT_PLAIN;
11948
11949 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011950 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011951 rc = XMIT_CSUM_V6;
11952 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11953 rc |= XMIT_CSUM_TCP;
11954
11955 } else {
11956 rc = XMIT_CSUM_V4;
11957 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11958 rc |= XMIT_CSUM_TCP;
11959 }
11960 }
11961
11962 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011963 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011964
11965 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011966 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011967
11968 return rc;
11969}
11970
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011971#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011972/* check if packet requires linearization (packet is too fragmented)
11973 no need to check fragmentation if page size > 8K (there will be no
11974 violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011975static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11976 u32 xmit_type)
11977{
11978 int to_copy = 0;
11979 int hlen = 0;
11980 int first_bd_sz = 0;
11981
11982 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11983 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11984
11985 if (xmit_type & XMIT_GSO) {
11986 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11987 /* Check if LSO packet needs to be copied:
11988 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11989 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011990 /* Number of windows to check */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070011991 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11992 int wnd_idx = 0;
11993 int frag_idx = 0;
11994 u32 wnd_sum = 0;
11995
11996 /* Headers length */
11997 hlen = (int)(skb_transport_header(skb) - skb->data) +
11998 tcp_hdrlen(skb);
11999
12000 /* Amount of data (w/o headers) on linear part of SKB*/
12001 first_bd_sz = skb_headlen(skb) - hlen;
12002
12003 wnd_sum = first_bd_sz;
12004
12005 /* Calculate the first sum - it's special */
12006 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12007 wnd_sum +=
12008 skb_shinfo(skb)->frags[frag_idx].size;
12009
12010 /* If there was data on linear skb data - check it */
12011 if (first_bd_sz > 0) {
12012 if (unlikely(wnd_sum < lso_mss)) {
12013 to_copy = 1;
12014 goto exit_lbl;
12015 }
12016
12017 wnd_sum -= first_bd_sz;
12018 }
12019
12020 /* Others are easier: run through the frag list and
12021 check all windows */
12022 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12023 wnd_sum +=
12024 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12025
12026 if (unlikely(wnd_sum < lso_mss)) {
12027 to_copy = 1;
12028 break;
12029 }
12030 wnd_sum -=
12031 skb_shinfo(skb)->frags[wnd_idx].size;
12032 }
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012033 } else {
12034 /* in non-LSO too fragmented packet should always
12035 be linearized */
12036 to_copy = 1;
12037 }
12038 }
12039
12040exit_lbl:
12041 if (unlikely(to_copy))
12042 DP(NETIF_MSG_TX_QUEUED,
12043 "Linearization IS REQUIRED for %s packet. "
12044 "num_frags %d hlen %d first_bd_sz %d\n",
12045 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12046 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12047
12048 return to_copy;
12049}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012050#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012051
12052/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012053 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012054 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012055 */
Stephen Hemminger613573252009-08-31 19:50:58 +000012056static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012057{
12058 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012059 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012060 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012061 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070012062 struct eth_tx_start_bd *tx_start_bd;
12063 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012064 struct eth_tx_parse_bd *pbd = NULL;
12065 u16 pkt_prod, bd_prod;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012066 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012067 dma_addr_t mapping;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012068 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012069 int i;
12070 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070012071 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012072
12073#ifdef BNX2X_STOP_ON_ERROR
12074 if (unlikely(bp->panic))
12075 return NETDEV_TX_BUSY;
12076#endif
12077
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012078 fp_index = skb_get_queue_mapping(skb);
12079 txq = netdev_get_tx_queue(dev, fp_index);
12080
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012081 fp = &bp->fp[fp_index];
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012082
Yitchak Gertner231fd582008-08-25 15:27:06 -070012083 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012084 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012085 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012086 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12087 return NETDEV_TX_BUSY;
12088 }
12089
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012090 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12091 " gso type %x xmit_type %x\n",
12092 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12093 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12094
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012095#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000012096 /* First, check if we need to linearize the skb (due to FW
12097 restrictions). No need to check fragmentation if page size > 8K
12098 (there will be no violation to FW restrictions) */
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012099 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12100 /* Statistics of linearization */
12101 bp->lin_cnt++;
12102 if (skb_linearize(skb) != 0) {
12103 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12104 "silently dropping this SKB\n");
12105 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012106 return NETDEV_TX_OK;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012107 }
12108 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000012109#endif
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012110
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012111 /*
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012112 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070012113 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012114 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012115 (don't forget to mark the last one as last,
12116 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012117 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012118 */
12119
12120 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012121 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012122
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012123 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012124 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070012125 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012126
Eilon Greensteinca003922009-08-12 22:53:28 -070012127 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12128 tx_start_bd->general_data = (UNICAST_ADDRESS <<
12129 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070012130 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070012131 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012132
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012133 /* remember the first BD of the packet */
12134 tx_buf->first_bd = fp->tx_bd_prod;
12135 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070012136 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012137
12138 DP(NETIF_MSG_TX_QUEUED,
12139 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070012140 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012141
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012142#ifdef BCM_VLAN
12143 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12144 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070012145 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12146 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012147 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012148#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070012149 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012150
Eilon Greensteinca003922009-08-12 22:53:28 -070012151 /* turn on parsing and get a BD */
12152 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12153 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012154
Eilon Greensteinca003922009-08-12 22:53:28 -070012155 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012156
12157 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070012158 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012159
12160 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000012161 pbd->global_data =
12162 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12163 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012164
12165 pbd->ip_hlen = (skb_transport_header(skb) -
12166 skb_network_header(skb)) / 2;
12167
12168 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12169
12170 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070012171 hlen = hlen*2;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012172
Eilon Greensteinca003922009-08-12 22:53:28 -070012173 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012174
12175 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070012176 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012177 ETH_TX_BD_FLAGS_IP_CSUM;
12178 else
Eilon Greensteinca003922009-08-12 22:53:28 -070012179 tx_start_bd->bd_flags.as_bitfield |=
12180 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012181
12182 if (xmit_type & XMIT_CSUM_TCP) {
12183 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12184
12185 } else {
12186 s8 fix = SKB_CS_OFF(skb); /* signed! */
12187
Eilon Greensteinca003922009-08-12 22:53:28 -070012188 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012189
12190 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070012191 "hlen %d fix %d csum before fix %x\n",
12192 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012193
12194 /* HW bug: fixup the CSUM */
12195 pbd->tcp_pseudo_csum =
12196 bnx2x_csum_fix(skb_transport_header(skb),
12197 SKB_CS(skb), fix);
12198
12199 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12200 pbd->tcp_pseudo_csum);
12201 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012202 }
12203
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012204 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12205 skb_headlen(skb), DMA_TO_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012206
Eilon Greensteinca003922009-08-12 22:53:28 -070012207 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12208 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12209 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12210 tx_start_bd->nbd = cpu_to_le16(nbd);
12211 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12212 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012213
12214 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012215 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070012216 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12217 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12218 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012219
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012220 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012221
12222 DP(NETIF_MSG_TX_QUEUED,
12223 "TSO packet len %d hlen %d total len %d tso size %d\n",
12224 skb->len, hlen, skb_headlen(skb),
12225 skb_shinfo(skb)->gso_size);
12226
Eilon Greensteinca003922009-08-12 22:53:28 -070012227 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012228
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012229 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070012230 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12231 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012232
12233 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12234 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012235 pbd->tcp_flags = pbd_tcp_flags(skb);
12236
12237 if (xmit_type & XMIT_GSO_V4) {
12238 pbd->ip_id = swab16(ip_hdr(skb)->id);
12239 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012240 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12241 ip_hdr(skb)->daddr,
12242 0, IPPROTO_TCP, 0));
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012243
12244 } else
12245 pbd->tcp_pseudo_csum =
12246 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12247 &ipv6_hdr(skb)->daddr,
12248 0, IPPROTO_TCP, 0));
12249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012250 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12251 }
Eilon Greensteinca003922009-08-12 22:53:28 -070012252 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012253
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012254 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012256
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012257 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070012258 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12259 if (total_pkt_bd == NULL)
12260 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012261
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012262 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12263 frag->page_offset,
12264 frag->size, DMA_TO_DEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012265
Eilon Greensteinca003922009-08-12 22:53:28 -070012266 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12267 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12268 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12269 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012270
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012271 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070012272 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12273 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12274 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012275 }
12276
Eilon Greensteinca003922009-08-12 22:53:28 -070012277 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012278
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012279 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12280
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012281 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012282 * if the packet contains or ends with it
12283 */
12284 if (TX_BD_POFF(bd_prod) < nbd)
12285 nbd++;
12286
Eilon Greensteinca003922009-08-12 22:53:28 -070012287 if (total_pkt_bd != NULL)
12288 total_pkt_bd->total_pkt_bytes = pkt_size;
12289
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012290 if (pbd)
12291 DP(NETIF_MSG_TX_QUEUED,
12292 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12293 " tcp_flags %x xsum %x seq %u hlen %u\n",
12294 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12295 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012296 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012297
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012298 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012299
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080012300 /*
12301 * Make sure that the BD data is updated before updating the producer
12302 * since FW might read the BD right after the producer is updated.
12303 * This is only applicable for weak-ordered memory model archs such
12304 * as IA-64. The following barrier is also mandatory since FW will
12305 * assumes packets must have BDs.
12306 */
12307 wmb();
12308
Eilon Greensteinca003922009-08-12 22:53:28 -070012309 fp->tx_db.data.prod += nbd;
12310 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012311 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012312
12313 mmiowb();
12314
Eilon Greenstein755735eb2008-06-23 20:35:13 -070012315 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012316
12317 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070012318 netif_tx_stop_queue(txq);
Stanislaw Gruszka9baddeb2010-03-09 06:55:02 +000012319
12320 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12321 * ordering of set_bit() in netif_tx_stop_queue() and read of
12322 * fp->bd_tx_cons */
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080012323 smp_mb();
Stanislaw Gruszka9baddeb2010-03-09 06:55:02 +000012324
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012325 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012326 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012327 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012328 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012329 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012330
12331 return NETDEV_TX_OK;
12332}
12333
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012334/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012335static int bnx2x_open(struct net_device *dev)
12336{
12337 struct bnx2x *bp = netdev_priv(dev);
12338
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000012339 netif_carrier_off(dev);
12340
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012341 bnx2x_set_power_state(bp, PCI_D0);
12342
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012343 if (!bnx2x_reset_is_done(bp)) {
12344 do {
12345 /* Reset MCP mail box sequence if there is on going
12346 * recovery
12347 */
12348 bp->fw_seq = 0;
12349
12350 /* If it's the first function to load and reset done
12351 * is still not cleared it may mean that. We don't
12352 * check the attention state here because it may have
12353 * already been cleared by a "common" reset but we
12354 * shell proceed with "process kill" anyway.
12355 */
12356 if ((bnx2x_get_load_cnt(bp) == 0) &&
12357 bnx2x_trylock_hw_lock(bp,
12358 HW_LOCK_RESOURCE_RESERVED_08) &&
12359 (!bnx2x_leader_reset(bp))) {
12360 DP(NETIF_MSG_HW, "Recovered in open\n");
12361 break;
12362 }
12363
12364 bnx2x_set_power_state(bp, PCI_D3hot);
12365
12366 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12367 " completed yet. Try again later. If u still see this"
12368 " message after a few retries then power cycle is"
12369 " required.\n", bp->dev->name);
12370
12371 return -EAGAIN;
12372 } while (0);
12373 }
12374
12375 bp->recovery_state = BNX2X_RECOVERY_DONE;
12376
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012377 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012378}
12379
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012380/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012381static int bnx2x_close(struct net_device *dev)
12382{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012383 struct bnx2x *bp = netdev_priv(dev);
12384
12385 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070012386 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12387 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12388 if (!CHIP_REV_IS_SLOW(bp))
12389 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012390
12391 return 0;
12392}
12393
Eilon Greensteinf5372252009-02-12 08:38:30 +000012394/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012395static void bnx2x_set_rx_mode(struct net_device *dev)
12396{
12397 struct bnx2x *bp = netdev_priv(dev);
12398 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12399 int port = BP_PORT(bp);
12400
12401 if (bp->state != BNX2X_STATE_OPEN) {
12402 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12403 return;
12404 }
12405
12406 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12407
12408 if (dev->flags & IFF_PROMISC)
12409 rx_mode = BNX2X_RX_MODE_PROMISC;
12410
12411 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000012412 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12413 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012414 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12415
12416 else { /* some multicasts */
12417 if (CHIP_IS_E1(bp)) {
12418 int i, old, offset;
Jiri Pirko22bedad32010-04-01 21:22:57 +000012419 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012420 struct mac_configuration_cmd *config =
12421 bnx2x_sp(bp, mcast_config);
12422
Jiri Pirko0ddf4772010-02-20 00:13:58 +000012423 i = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +000012424 netdev_for_each_mc_addr(ha, dev) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012425 config->config_table[i].
12426 cam_entry.msb_mac_addr =
Jiri Pirko22bedad32010-04-01 21:22:57 +000012427 swab16(*(u16 *)&ha->addr[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012428 config->config_table[i].
12429 cam_entry.middle_mac_addr =
Jiri Pirko22bedad32010-04-01 21:22:57 +000012430 swab16(*(u16 *)&ha->addr[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012431 config->config_table[i].
12432 cam_entry.lsb_mac_addr =
Jiri Pirko22bedad32010-04-01 21:22:57 +000012433 swab16(*(u16 *)&ha->addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012434 config->config_table[i].cam_entry.flags =
12435 cpu_to_le16(port);
12436 config->config_table[i].
12437 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070012438 config->config_table[i].target_table_entry.
12439 clients_bit_vector =
12440 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012441 config->config_table[i].
12442 target_table_entry.vlan_id = 0;
12443
12444 DP(NETIF_MSG_IFUP,
12445 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12446 config->config_table[i].
12447 cam_entry.msb_mac_addr,
12448 config->config_table[i].
12449 cam_entry.middle_mac_addr,
12450 config->config_table[i].
12451 cam_entry.lsb_mac_addr);
Jiri Pirko0ddf4772010-02-20 00:13:58 +000012452 i++;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012453 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012454 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012455 if (old > i) {
12456 for (; i < old; i++) {
12457 if (CAM_IS_INVALID(config->
12458 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000012459 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012460 break;
12461 }
12462 /* invalidate */
12463 CAM_INVALIDATE(config->
12464 config_table[i]);
12465 }
12466 }
12467
12468 if (CHIP_REV_IS_SLOW(bp))
12469 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12470 else
12471 offset = BNX2X_MAX_MULTICAST*(1 + port);
12472
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012473 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012474 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012475 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012476 config->hdr.reserved1 = 0;
12477
Michael Chane665bfd2009-10-10 13:46:54 +000012478 bp->set_mac_pending++;
12479 smp_wmb();
12480
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012481 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12482 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12483 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12484 0);
12485 } else { /* E1H */
12486 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +000012487 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012488 u32 mc_filter[MC_HASH_SIZE];
12489 u32 crc, bit, regidx;
12490 int i;
12491
12492 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12493
Jiri Pirko22bedad32010-04-01 21:22:57 +000012494 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -070012495 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Jiri Pirko22bedad32010-04-01 21:22:57 +000012496 ha->addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012497
Jiri Pirko22bedad32010-04-01 21:22:57 +000012498 crc = crc32c_le(0, ha->addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012499 bit = (crc >> 24) & 0xff;
12500 regidx = bit >> 5;
12501 bit &= 0x1f;
12502 mc_filter[regidx] |= (1 << bit);
12503 }
12504
12505 for (i = 0; i < MC_HASH_SIZE; i++)
12506 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12507 mc_filter[i]);
12508 }
12509 }
12510
12511 bp->rx_mode = rx_mode;
12512 bnx2x_set_storm_rx_mode(bp);
12513}
12514
12515/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012516static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12517{
12518 struct sockaddr *addr = p;
12519 struct bnx2x *bp = netdev_priv(dev);
12520
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012521 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012522 return -EINVAL;
12523
12524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012525 if (netif_running(dev)) {
12526 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000012527 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012528 else
Michael Chane665bfd2009-10-10 13:46:54 +000012529 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012530 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012531
12532 return 0;
12533}
12534
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070012535/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012536static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12537 int devad, u16 addr)
12538{
12539 struct bnx2x *bp = netdev_priv(netdev);
12540 u16 value;
12541 int rc;
12542 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12543
12544 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12545 prtad, devad, addr);
12546
12547 if (prtad != bp->mdio.prtad) {
12548 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12549 prtad, bp->mdio.prtad);
12550 return -EINVAL;
12551 }
12552
12553 /* The HW expects different devad if CL22 is used */
12554 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12555
12556 bnx2x_acquire_phy_lock(bp);
12557 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12558 devad, addr, &value);
12559 bnx2x_release_phy_lock(bp);
12560 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12561
12562 if (!rc)
12563 rc = value;
12564 return rc;
12565}
12566
12567/* called with rtnl_lock */
12568static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12569 u16 addr, u16 value)
12570{
12571 struct bnx2x *bp = netdev_priv(netdev);
12572 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12573 int rc;
12574
12575 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12576 " value 0x%x\n", prtad, devad, addr, value);
12577
12578 if (prtad != bp->mdio.prtad) {
12579 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12580 prtad, bp->mdio.prtad);
12581 return -EINVAL;
12582 }
12583
12584 /* The HW expects different devad if CL22 is used */
12585 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12586
12587 bnx2x_acquire_phy_lock(bp);
12588 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12589 devad, addr, value);
12590 bnx2x_release_phy_lock(bp);
12591 return rc;
12592}
12593
12594/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012595static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12596{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012597 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012598 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012599
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012600 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12601 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012602
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012603 if (!netif_running(dev))
12604 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070012605
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012606 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012607}
12608
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012609/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012610static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12611{
12612 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012613 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012614
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012615 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12616 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12617 return -EAGAIN;
12618 }
12619
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012620 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12621 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12622 return -EINVAL;
12623
12624 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080012625 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012626 * only updated as part of load
12627 */
12628 dev->mtu = new_mtu;
12629
12630 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012631 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12632 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012633 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012634
12635 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012636}
12637
12638static void bnx2x_tx_timeout(struct net_device *dev)
12639{
12640 struct bnx2x *bp = netdev_priv(dev);
12641
12642#ifdef BNX2X_STOP_ON_ERROR
12643 if (!bp->panic)
12644 bnx2x_panic();
12645#endif
12646 /* This allows the netif to be shutdown gracefully before resetting */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012647 schedule_delayed_work(&bp->reset_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012648}
12649
12650#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012651/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012652static void bnx2x_vlan_rx_register(struct net_device *dev,
12653 struct vlan_group *vlgrp)
12654{
12655 struct bnx2x *bp = netdev_priv(dev);
12656
12657 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012658
12659 /* Set flags according to the required capabilities */
12660 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12661
12662 if (dev->features & NETIF_F_HW_VLAN_TX)
12663 bp->flags |= HW_VLAN_TX_FLAG;
12664
12665 if (dev->features & NETIF_F_HW_VLAN_RX)
12666 bp->flags |= HW_VLAN_RX_FLAG;
12667
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012668 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080012669 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012670}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012671
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012672#endif
12673
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000012674#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012675static void poll_bnx2x(struct net_device *dev)
12676{
12677 struct bnx2x *bp = netdev_priv(dev);
12678
12679 disable_irq(bp->pdev->irq);
12680 bnx2x_interrupt(bp->pdev->irq, dev);
12681 enable_irq(bp->pdev->irq);
12682}
12683#endif
12684
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012685static const struct net_device_ops bnx2x_netdev_ops = {
12686 .ndo_open = bnx2x_open,
12687 .ndo_stop = bnx2x_close,
12688 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012689 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012690 .ndo_set_mac_address = bnx2x_change_mac_addr,
12691 .ndo_validate_addr = eth_validate_addr,
12692 .ndo_do_ioctl = bnx2x_ioctl,
12693 .ndo_change_mtu = bnx2x_change_mtu,
12694 .ndo_tx_timeout = bnx2x_tx_timeout,
12695#ifdef BCM_VLAN
12696 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12697#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000012698#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012699 .ndo_poll_controller = poll_bnx2x,
12700#endif
12701};
12702
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012703static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12704 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012705{
12706 struct bnx2x *bp;
12707 int rc;
12708
12709 SET_NETDEV_DEV(dev, &pdev->dev);
12710 bp = netdev_priv(dev);
12711
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012712 bp->dev = dev;
12713 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012714 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012715 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012716
12717 rc = pci_enable_device(pdev);
12718 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012719 pr_err("Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012720 goto err_out;
12721 }
12722
12723 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Joe Perches7995c642010-02-17 15:01:52 +000012724 pr_err("Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012725 rc = -ENODEV;
12726 goto err_out_disable;
12727 }
12728
12729 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Joe Perches7995c642010-02-17 15:01:52 +000012730 pr_err("Cannot find second PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012731 rc = -ENODEV;
12732 goto err_out_disable;
12733 }
12734
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012735 if (atomic_read(&pdev->enable_cnt) == 1) {
12736 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12737 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012738 pr_err("Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012739 goto err_out_disable;
12740 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012741
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012742 pci_set_master(pdev);
12743 pci_save_state(pdev);
12744 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012745
12746 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12747 if (bp->pm_cap == 0) {
Joe Perches7995c642010-02-17 15:01:52 +000012748 pr_err("Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012749 rc = -EIO;
12750 goto err_out_release;
12751 }
12752
12753 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12754 if (bp->pcie_cap == 0) {
Joe Perches7995c642010-02-17 15:01:52 +000012755 pr_err("Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012756 rc = -EIO;
12757 goto err_out_release;
12758 }
12759
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012760 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012761 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012762 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12763 pr_err("dma_set_coherent_mask failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012764 rc = -EIO;
12765 goto err_out_release;
12766 }
12767
FUJITA Tomonori1a983142010-04-04 01:51:03 +000012768 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Joe Perches7995c642010-02-17 15:01:52 +000012769 pr_err("System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012770 rc = -EIO;
12771 goto err_out_release;
12772 }
12773
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012774 dev->mem_start = pci_resource_start(pdev, 0);
12775 dev->base_addr = dev->mem_start;
12776 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012777
12778 dev->irq = pdev->irq;
12779
Arjan van de Ven275f1652008-10-20 21:42:39 -070012780 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012781 if (!bp->regview) {
Joe Perches7995c642010-02-17 15:01:52 +000012782 pr_err("Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012783 rc = -ENOMEM;
12784 goto err_out_release;
12785 }
12786
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012787 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12788 min_t(u64, BNX2X_DB_SIZE,
12789 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012790 if (!bp->doorbells) {
Joe Perches7995c642010-02-17 15:01:52 +000012791 pr_err("Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012792 rc = -ENOMEM;
12793 goto err_out_unmap;
12794 }
12795
12796 bnx2x_set_power_state(bp, PCI_D0);
12797
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012798 /* clean indirect addresses */
12799 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12800 PCICFG_VENDOR_ID_OFFSET);
12801 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12802 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12803 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12804 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012805
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000012806 /* Reset the load counter */
12807 bnx2x_clear_load_cnt(bp);
12808
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012809 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012810
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080012811 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012812 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012813 dev->features |= NETIF_F_SG;
12814 dev->features |= NETIF_F_HW_CSUM;
12815 if (bp->flags & USING_DAC_FLAG)
12816 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000012817 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12818 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012819#ifdef BCM_VLAN
12820 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080012821 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000012822
12823 dev->vlan_features |= NETIF_F_SG;
12824 dev->vlan_features |= NETIF_F_HW_CSUM;
12825 if (bp->flags & USING_DAC_FLAG)
12826 dev->vlan_features |= NETIF_F_HIGHDMA;
12827 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12828 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012829#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012830
Eilon Greenstein01cd4522009-08-12 08:23:08 +000012831 /* get_port_hwinfo() will set prtad and mmds properly */
12832 bp->mdio.prtad = MDIO_PRTAD_NONE;
12833 bp->mdio.mmds = 0;
12834 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12835 bp->mdio.dev = dev;
12836 bp->mdio.mdio_read = bnx2x_mdio_read;
12837 bp->mdio.mdio_write = bnx2x_mdio_write;
12838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012839 return 0;
12840
12841err_out_unmap:
12842 if (bp->regview) {
12843 iounmap(bp->regview);
12844 bp->regview = NULL;
12845 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012846 if (bp->doorbells) {
12847 iounmap(bp->doorbells);
12848 bp->doorbells = NULL;
12849 }
12850
12851err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012852 if (atomic_read(&pdev->enable_cnt) == 1)
12853 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012854
12855err_out_disable:
12856 pci_disable_device(pdev);
12857 pci_set_drvdata(pdev, NULL);
12858
12859err_out:
12860 return rc;
12861}
12862
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012863static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12864 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080012865{
12866 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12867
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012868 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12869
12870 /* return value of 1=2.5GHz 2=5GHz */
12871 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080012872}
12873
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012874static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12875{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012876 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012877 struct bnx2x_fw_file_hdr *fw_hdr;
12878 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012879 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012880 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012881 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012882 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012883
12884 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12885 return -EINVAL;
12886
12887 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12888 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12889
12890 /* Make sure none of the offsets and sizes make us read beyond
12891 * the end of the firmware data */
12892 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12893 offset = be32_to_cpu(sections[i].offset);
12894 len = be32_to_cpu(sections[i].len);
12895 if (offset + len > firmware->size) {
Joe Perches7995c642010-02-17 15:01:52 +000012896 pr_err("Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012897 return -EINVAL;
12898 }
12899 }
12900
12901 /* Likewise for the init_ops offsets */
12902 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12903 ops_offsets = (u16 *)(firmware->data + offset);
12904 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12905
12906 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12907 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Joe Perches7995c642010-02-17 15:01:52 +000012908 pr_err("Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012909 return -EINVAL;
12910 }
12911 }
12912
12913 /* Check FW version */
12914 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12915 fw_ver = firmware->data + offset;
12916 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12917 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12918 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12919 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Joe Perches7995c642010-02-17 15:01:52 +000012920 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012921 fw_ver[0], fw_ver[1], fw_ver[2],
12922 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12923 BCM_5710_FW_MINOR_VERSION,
12924 BCM_5710_FW_REVISION_VERSION,
12925 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012926 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012927 }
12928
12929 return 0;
12930}
12931
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012932static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012933{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012934 const __be32 *source = (const __be32 *)_source;
12935 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012936 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012937
12938 for (i = 0; i < n/4; i++)
12939 target[i] = be32_to_cpu(source[i]);
12940}
12941
12942/*
12943 Ops array is stored in the following format:
12944 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12945 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012946static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012947{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012948 const __be32 *source = (const __be32 *)_source;
12949 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012950 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012951
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012952 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012953 tmp = be32_to_cpu(source[j]);
12954 target[i].op = (tmp >> 24) & 0xff;
12955 target[i].offset = tmp & 0xffffff;
12956 target[i].raw_data = be32_to_cpu(source[j+1]);
12957 }
12958}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012959
12960static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012961{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012962 const __be16 *source = (const __be16 *)_source;
12963 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012964 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012965
12966 for (i = 0; i < n/2; i++)
12967 target[i] = be16_to_cpu(source[i]);
12968}
12969
Joe Perches7995c642010-02-17 15:01:52 +000012970#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12971do { \
12972 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12973 bp->arr = kmalloc(len, GFP_KERNEL); \
12974 if (!bp->arr) { \
12975 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12976 goto lbl; \
12977 } \
12978 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12979 (u8 *)bp->arr, len); \
12980} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012981
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012982static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12983{
Ben Hutchings45229b42009-11-07 11:53:39 +000012984 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012985 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012986 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012987
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012988 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012989 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012990 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012991 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012992
Joe Perches7995c642010-02-17 15:01:52 +000012993 pr_info("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012994
12995 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12996 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012997 pr_err("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012998 goto request_firmware_exit;
12999 }
13000
13001 rc = bnx2x_check_firmware(bp);
13002 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000013003 pr_err("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013004 goto request_firmware_exit;
13005 }
13006
13007 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13008
13009 /* Initialize the pointers to the init arrays */
13010 /* Blob */
13011 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13012
13013 /* Opcodes */
13014 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13015
13016 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013017 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13018 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013019
13020 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000013021 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13022 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13023 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13024 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13025 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13026 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13027 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13028 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13029 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13030 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13031 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13032 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13033 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13034 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13035 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13036 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013037
13038 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000013039
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013040init_offsets_alloc_err:
13041 kfree(bp->init_ops);
13042init_ops_alloc_err:
13043 kfree(bp->init_data);
13044request_firmware_exit:
13045 release_firmware(bp->firmware);
13046
13047 return rc;
13048}
13049
13050
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013051static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13052 const struct pci_device_id *ent)
13053{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013054 struct net_device *dev = NULL;
13055 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013056 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080013057 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013058
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013059 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000013060 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013061 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000013062 pr_err("Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013063 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013064 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013065
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013066 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +000013067 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013068
Eilon Greensteindf4770de2009-08-12 08:23:28 +000013069 pci_set_drvdata(pdev, dev);
13070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013071 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013072 if (rc < 0) {
13073 free_netdev(dev);
13074 return rc;
13075 }
13076
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013077 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000013078 if (rc)
13079 goto init_one_exit;
13080
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013081 /* Set init arrays */
13082 rc = bnx2x_init_firmware(bp, &pdev->dev);
13083 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000013084 pr_err("Error loading firmware\n");
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013085 goto init_one_exit;
13086 }
13087
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000013088 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013089 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000013090 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013091 goto init_one_exit;
13092 }
13093
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000013094 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Joe Perches7995c642010-02-17 15:01:52 +000013095 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13096 board_info[ent->driver_data].name,
13097 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13098 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13099 dev->base_addr, bp->pdev->irq, dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000013100
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013101 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013102
13103init_one_exit:
13104 if (bp->regview)
13105 iounmap(bp->regview);
13106
13107 if (bp->doorbells)
13108 iounmap(bp->doorbells);
13109
13110 free_netdev(dev);
13111
13112 if (atomic_read(&pdev->enable_cnt) == 1)
13113 pci_release_regions(pdev);
13114
13115 pci_disable_device(pdev);
13116 pci_set_drvdata(pdev, NULL);
13117
13118 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013119}
13120
13121static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13122{
13123 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080013124 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013125
Eliezer Tamir228241e2008-02-28 11:56:57 -080013126 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000013127 pr_err("BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080013128 return;
13129 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080013130 bp = netdev_priv(dev);
13131
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013132 unregister_netdev(dev);
13133
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013134 /* Make sure RESET task is not scheduled before continuing */
13135 cancel_delayed_work_sync(&bp->reset_task);
13136
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013137 kfree(bp->init_ops_offsets);
13138 kfree(bp->init_ops);
13139 kfree(bp->init_data);
13140 release_firmware(bp->firmware);
13141
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013142 if (bp->regview)
13143 iounmap(bp->regview);
13144
13145 if (bp->doorbells)
13146 iounmap(bp->doorbells);
13147
13148 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013149
13150 if (atomic_read(&pdev->enable_cnt) == 1)
13151 pci_release_regions(pdev);
13152
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013153 pci_disable_device(pdev);
13154 pci_set_drvdata(pdev, NULL);
13155}
13156
13157static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13158{
13159 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080013160 struct bnx2x *bp;
13161
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013162 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000013163 pr_err("BAD net device from bnx2x_init_one\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013164 return -ENODEV;
13165 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080013166 bp = netdev_priv(dev);
13167
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013168 rtnl_lock();
13169
13170 pci_save_state(pdev);
13171
13172 if (!netif_running(dev)) {
13173 rtnl_unlock();
13174 return 0;
13175 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013176
13177 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013178
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070013179 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013180
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013181 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080013182
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013183 rtnl_unlock();
13184
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013185 return 0;
13186}
13187
13188static int bnx2x_resume(struct pci_dev *pdev)
13189{
13190 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080013191 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013192 int rc;
13193
Eliezer Tamir228241e2008-02-28 11:56:57 -080013194 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000013195 pr_err("BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080013196 return -ENODEV;
13197 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080013198 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013199
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013200 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13201 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13202 return -EAGAIN;
13203 }
13204
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013205 rtnl_lock();
13206
Eliezer Tamir228241e2008-02-28 11:56:57 -080013207 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013208
13209 if (!netif_running(dev)) {
13210 rtnl_unlock();
13211 return 0;
13212 }
13213
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013214 bnx2x_set_power_state(bp, PCI_D0);
13215 netif_device_attach(dev);
13216
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070013217 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013218
Eilon Greenstein34f80b02008-06-23 20:33:01 -070013219 rtnl_unlock();
13220
13221 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013222}
13223
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013224static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13225{
13226 int i;
13227
13228 bp->state = BNX2X_STATE_ERROR;
13229
13230 bp->rx_mode = BNX2X_RX_MODE_NONE;
13231
13232 bnx2x_netif_stop(bp, 0);
13233
13234 del_timer_sync(&bp->timer);
13235 bp->stats_state = STATS_STATE_DISABLED;
13236 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13237
13238 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +000013239 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013240
13241 if (CHIP_IS_E1(bp)) {
13242 struct mac_configuration_cmd *config =
13243 bnx2x_sp(bp, mcast_config);
13244
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080013245 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013246 CAM_INVALIDATE(config->config_table[i]);
13247 }
13248
13249 /* Free SKBs, SGEs, TPA pool and driver internals */
13250 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000013251 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013252 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000013253 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000013254 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013255 bnx2x_free_mem(bp);
13256
13257 bp->state = BNX2X_STATE_CLOSED;
13258
13259 netif_carrier_off(bp->dev);
13260
13261 return 0;
13262}
13263
13264static void bnx2x_eeh_recover(struct bnx2x *bp)
13265{
13266 u32 val;
13267
13268 mutex_init(&bp->port.phy_mutex);
13269
13270 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13271 bp->link_params.shmem_base = bp->common.shmem_base;
13272 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13273
13274 if (!bp->common.shmem_base ||
13275 (bp->common.shmem_base < 0xA0000) ||
13276 (bp->common.shmem_base >= 0xC0000)) {
13277 BNX2X_DEV_INFO("MCP not active\n");
13278 bp->flags |= NO_MCP_FLAG;
13279 return;
13280 }
13281
13282 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13283 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13284 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13285 BNX2X_ERR("BAD MCP validity signature\n");
13286
13287 if (!BP_NOMCP(bp)) {
13288 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13289 & DRV_MSG_SEQ_NUMBER_MASK);
13290 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13291 }
13292}
13293
Wendy Xiong493adb12008-06-23 20:36:22 -070013294/**
13295 * bnx2x_io_error_detected - called when PCI error is detected
13296 * @pdev: Pointer to PCI device
13297 * @state: The current pci connection state
13298 *
13299 * This function is called after a PCI bus error affecting
13300 * this device has been detected.
13301 */
13302static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13303 pci_channel_state_t state)
13304{
13305 struct net_device *dev = pci_get_drvdata(pdev);
13306 struct bnx2x *bp = netdev_priv(dev);
13307
13308 rtnl_lock();
13309
13310 netif_device_detach(dev);
13311
Dean Nelson07ce50e2009-07-31 09:13:25 +000013312 if (state == pci_channel_io_perm_failure) {
13313 rtnl_unlock();
13314 return PCI_ERS_RESULT_DISCONNECT;
13315 }
13316
Wendy Xiong493adb12008-06-23 20:36:22 -070013317 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013318 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070013319
13320 pci_disable_device(pdev);
13321
13322 rtnl_unlock();
13323
13324 /* Request a slot reset */
13325 return PCI_ERS_RESULT_NEED_RESET;
13326}
13327
13328/**
13329 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13330 * @pdev: Pointer to PCI device
13331 *
13332 * Restart the card from scratch, as if from a cold-boot.
13333 */
13334static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13335{
13336 struct net_device *dev = pci_get_drvdata(pdev);
13337 struct bnx2x *bp = netdev_priv(dev);
13338
13339 rtnl_lock();
13340
13341 if (pci_enable_device(pdev)) {
13342 dev_err(&pdev->dev,
13343 "Cannot re-enable PCI device after reset\n");
13344 rtnl_unlock();
13345 return PCI_ERS_RESULT_DISCONNECT;
13346 }
13347
13348 pci_set_master(pdev);
13349 pci_restore_state(pdev);
13350
13351 if (netif_running(dev))
13352 bnx2x_set_power_state(bp, PCI_D0);
13353
13354 rtnl_unlock();
13355
13356 return PCI_ERS_RESULT_RECOVERED;
13357}
13358
13359/**
13360 * bnx2x_io_resume - called when traffic can start flowing again
13361 * @pdev: Pointer to PCI device
13362 *
13363 * This callback is called when the error recovery driver tells us that
13364 * its OK to resume normal operation.
13365 */
13366static void bnx2x_io_resume(struct pci_dev *pdev)
13367{
13368 struct net_device *dev = pci_get_drvdata(pdev);
13369 struct bnx2x *bp = netdev_priv(dev);
13370
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +000013371 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13372 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13373 return;
13374 }
13375
Wendy Xiong493adb12008-06-23 20:36:22 -070013376 rtnl_lock();
13377
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013378 bnx2x_eeh_recover(bp);
13379
Wendy Xiong493adb12008-06-23 20:36:22 -070013380 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070013381 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070013382
13383 netif_device_attach(dev);
13384
13385 rtnl_unlock();
13386}
13387
13388static struct pci_error_handlers bnx2x_err_handler = {
13389 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000013390 .slot_reset = bnx2x_io_slot_reset,
13391 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070013392};
13393
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013394static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070013395 .name = DRV_MODULE_NAME,
13396 .id_table = bnx2x_pci_tbl,
13397 .probe = bnx2x_init_one,
13398 .remove = __devexit_p(bnx2x_remove_one),
13399 .suspend = bnx2x_suspend,
13400 .resume = bnx2x_resume,
13401 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013402};
13403
13404static int __init bnx2x_init(void)
13405{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000013406 int ret;
13407
Joe Perches7995c642010-02-17 15:01:52 +000013408 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +000013409
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080013410 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13411 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +000013412 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080013413 return -ENOMEM;
13414 }
13415
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000013416 ret = pci_register_driver(&bnx2x_pci_driver);
13417 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +000013418 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000013419 destroy_workqueue(bnx2x_wq);
13420 }
13421 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013422}
13423
13424static void __exit bnx2x_cleanup(void)
13425{
13426 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080013427
13428 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020013429}
13430
13431module_init(bnx2x_init);
13432module_exit(bnx2x_cleanup);
13433
Michael Chan993ac7b2009-10-10 13:46:56 +000013434#ifdef BCM_CNIC
13435
13436/* count denotes the number of new completions we have seen */
13437static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13438{
13439 struct eth_spe *spe;
13440
13441#ifdef BNX2X_STOP_ON_ERROR
13442 if (unlikely(bp->panic))
13443 return;
13444#endif
13445
13446 spin_lock_bh(&bp->spq_lock);
13447 bp->cnic_spq_pending -= count;
13448
13449 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13450 bp->cnic_spq_pending++) {
13451
13452 if (!bp->cnic_kwq_pending)
13453 break;
13454
13455 spe = bnx2x_sp_get_next(bp);
13456 *spe = *bp->cnic_kwq_cons;
13457
13458 bp->cnic_kwq_pending--;
13459
13460 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13461 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13462
13463 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13464 bp->cnic_kwq_cons = bp->cnic_kwq;
13465 else
13466 bp->cnic_kwq_cons++;
13467 }
13468 bnx2x_sp_prod_update(bp);
13469 spin_unlock_bh(&bp->spq_lock);
13470}
13471
13472static int bnx2x_cnic_sp_queue(struct net_device *dev,
13473 struct kwqe_16 *kwqes[], u32 count)
13474{
13475 struct bnx2x *bp = netdev_priv(dev);
13476 int i;
13477
13478#ifdef BNX2X_STOP_ON_ERROR
13479 if (unlikely(bp->panic))
13480 return -EIO;
13481#endif
13482
13483 spin_lock_bh(&bp->spq_lock);
13484
13485 for (i = 0; i < count; i++) {
13486 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13487
13488 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13489 break;
13490
13491 *bp->cnic_kwq_prod = *spe;
13492
13493 bp->cnic_kwq_pending++;
13494
13495 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13496 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13497 spe->data.mac_config_addr.hi,
13498 spe->data.mac_config_addr.lo,
13499 bp->cnic_kwq_pending);
13500
13501 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13502 bp->cnic_kwq_prod = bp->cnic_kwq;
13503 else
13504 bp->cnic_kwq_prod++;
13505 }
13506
13507 spin_unlock_bh(&bp->spq_lock);
13508
13509 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13510 bnx2x_cnic_sp_post(bp, 0);
13511
13512 return i;
13513}
13514
13515static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13516{
13517 struct cnic_ops *c_ops;
13518 int rc = 0;
13519
13520 mutex_lock(&bp->cnic_mutex);
13521 c_ops = bp->cnic_ops;
13522 if (c_ops)
13523 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13524 mutex_unlock(&bp->cnic_mutex);
13525
13526 return rc;
13527}
13528
13529static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13530{
13531 struct cnic_ops *c_ops;
13532 int rc = 0;
13533
13534 rcu_read_lock();
13535 c_ops = rcu_dereference(bp->cnic_ops);
13536 if (c_ops)
13537 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13538 rcu_read_unlock();
13539
13540 return rc;
13541}
13542
13543/*
13544 * for commands that have no data
13545 */
13546static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13547{
13548 struct cnic_ctl_info ctl = {0};
13549
13550 ctl.cmd = cmd;
13551
13552 return bnx2x_cnic_ctl_send(bp, &ctl);
13553}
13554
13555static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13556{
13557 struct cnic_ctl_info ctl;
13558
13559 /* first we tell CNIC and only then we count this as a completion */
13560 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13561 ctl.data.comp.cid = cid;
13562
13563 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13564 bnx2x_cnic_sp_post(bp, 1);
13565}
13566
13567static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13568{
13569 struct bnx2x *bp = netdev_priv(dev);
13570 int rc = 0;
13571
13572 switch (ctl->cmd) {
13573 case DRV_CTL_CTXTBL_WR_CMD: {
13574 u32 index = ctl->data.io.offset;
13575 dma_addr_t addr = ctl->data.io.dma_addr;
13576
13577 bnx2x_ilt_wr(bp, index, addr);
13578 break;
13579 }
13580
13581 case DRV_CTL_COMPLETION_CMD: {
13582 int count = ctl->data.comp.comp_count;
13583
13584 bnx2x_cnic_sp_post(bp, count);
13585 break;
13586 }
13587
13588 /* rtnl_lock is held. */
13589 case DRV_CTL_START_L2_CMD: {
13590 u32 cli = ctl->data.ring.client_id;
13591
13592 bp->rx_mode_cl_mask |= (1 << cli);
13593 bnx2x_set_storm_rx_mode(bp);
13594 break;
13595 }
13596
13597 /* rtnl_lock is held. */
13598 case DRV_CTL_STOP_L2_CMD: {
13599 u32 cli = ctl->data.ring.client_id;
13600
13601 bp->rx_mode_cl_mask &= ~(1 << cli);
13602 bnx2x_set_storm_rx_mode(bp);
13603 break;
13604 }
13605
13606 default:
13607 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13608 rc = -EINVAL;
13609 }
13610
13611 return rc;
13612}
13613
13614static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13615{
13616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13617
13618 if (bp->flags & USING_MSIX_FLAG) {
13619 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13620 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13621 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13622 } else {
13623 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13624 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13625 }
13626 cp->irq_arr[0].status_blk = bp->cnic_sb;
13627 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13628 cp->irq_arr[1].status_blk = bp->def_status_blk;
13629 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13630
13631 cp->num_irq = 2;
13632}
13633
13634static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13635 void *data)
13636{
13637 struct bnx2x *bp = netdev_priv(dev);
13638 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13639
13640 if (ops == NULL)
13641 return -EINVAL;
13642
13643 if (atomic_read(&bp->intr_sem) != 0)
13644 return -EBUSY;
13645
13646 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13647 if (!bp->cnic_kwq)
13648 return -ENOMEM;
13649
13650 bp->cnic_kwq_cons = bp->cnic_kwq;
13651 bp->cnic_kwq_prod = bp->cnic_kwq;
13652 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13653
13654 bp->cnic_spq_pending = 0;
13655 bp->cnic_kwq_pending = 0;
13656
13657 bp->cnic_data = data;
13658
13659 cp->num_irq = 0;
13660 cp->drv_state = CNIC_DRV_STATE_REGD;
13661
13662 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13663
13664 bnx2x_setup_cnic_irq_info(bp);
13665 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13666 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13667 rcu_assign_pointer(bp->cnic_ops, ops);
13668
13669 return 0;
13670}
13671
13672static int bnx2x_unregister_cnic(struct net_device *dev)
13673{
13674 struct bnx2x *bp = netdev_priv(dev);
13675 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13676
13677 mutex_lock(&bp->cnic_mutex);
13678 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13679 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13680 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13681 }
13682 cp->drv_state = 0;
13683 rcu_assign_pointer(bp->cnic_ops, NULL);
13684 mutex_unlock(&bp->cnic_mutex);
13685 synchronize_rcu();
13686 kfree(bp->cnic_kwq);
13687 bp->cnic_kwq = NULL;
13688
13689 return 0;
13690}
13691
13692struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13693{
13694 struct bnx2x *bp = netdev_priv(dev);
13695 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13696
13697 cp->drv_owner = THIS_MODULE;
13698 cp->chip_id = CHIP_ID(bp);
13699 cp->pdev = bp->pdev;
13700 cp->io_base = bp->regview;
13701 cp->io_base2 = bp->doorbells;
13702 cp->max_kwqe_pending = 8;
13703 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13704 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13705 cp->ctx_tbl_len = CNIC_ILT_LINES;
13706 cp->starting_cid = BCM_CNIC_CID_START;
13707 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13708 cp->drv_ctl = bnx2x_drv_ctl;
13709 cp->drv_register_cnic = bnx2x_register_cnic;
13710 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13711
13712 return cp;
13713}
13714EXPORT_SYMBOL(bnx2x_cnic_probe);
13715
13716#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070013717