blob: 306c2b8165e242c8400d6b416de9cc201540484a [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000060#define DRV_MODULE_VERSION "1.52.1-5"
Eilon Greenstein0ab365f2009-11-09 06:09:37 +000061#define DRV_MODULE_RELDATE "2009/11/09"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143static const struct pci_device_id bnx2x_pci_tbl[] = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perchesad361c92009-07-06 13:05:40 -0700517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perchesad361c92009-07-06 13:05:40 -0700519 printk(KERN_ERR PFX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800525 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800532 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perchesad361c92009-07-06 13:05:40 -0700534 printk(KERN_ERR PFX "end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 u8 storm, u16 index, u8 op, u8 update)
769{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
Eilon Greenstein5c862842008-08-13 15:51:48 -0700781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788}
789
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791{
792 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200793
794 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200797}
798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804
Eilon Greenstein5c862842008-08-13 15:51:48 -0700805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000821}
822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200834 int nbd;
835
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847
Eilon Greensteinca003922009-08-12 22:53:28 -0700848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 bnx2x_panic();
853 }
854#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700855 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700882 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000883 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888}
889
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892 s16 used;
893 u16 prod;
894 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200903
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700904#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700908#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911}
912
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924{
925 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000926 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000931 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932#endif
933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000934 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200946 hw_cons, sw_cons, pkt_cons);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000961 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200962
Eilon Greenstein60447352009-03-02 07:59:24 +0000963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000971 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700972 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000974 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000976 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200977}
978
Michael Chan993ac7b2009-10-10 13:46:56 +0000979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000992 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700993 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200994
995 bp->spq_left++;
996
Eilon Greenstein0626b892009-02-12 08:38:14 +0000997 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001016 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001017 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001018 return;
1019 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036 break;
1037
Michael Chan993ac7b2009-10-10 13:46:56 +00001038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001044
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001048 bp->set_mac_pending--;
1049 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001050 break;
1051
Eliezer Tamir49d66772008-02-28 11:53:13 -08001052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001054 bp->set_mac_pending--;
1055 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001056 break;
1057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001061 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001063 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064}
1065
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001107 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001202 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001203 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
Eilon Greenstein33471622008-08-13 15:59:08 -07001256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001340 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001347 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001382
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001383 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001449 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001452 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001463 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001484 ((u32 *)&rx_prods)[i]);
1485
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001486 mmiowb(); /* keep prod updates ordered */
1487
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001488 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001491}
1492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001513 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001524 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001525
1526 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001527 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001530 u8 cqe_fp_flags;
1531 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001553
1554 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001573 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001602 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001623 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001638 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001639 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001640 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
Eilon Greensteina119a062009-08-12 08:23:23 +00001654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001658 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001665 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001666 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001667 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001676 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001679 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001680 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001682 }
1683
Eilon Greenstein748e5432009-02-12 08:36:37 +00001684 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001694 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001708 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001713 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001731
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001739 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
1746
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001753
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001759 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001760 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001761 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001762 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001764 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001771 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
Eilon Greenstein3196a882008-08-13 15:58:49 -07001777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
Eilon Greensteinca003922009-08-12 22:53:28 -07001782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001784
Eilon Greensteinca003922009-08-12 22:53:28 -07001785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001795 status &= ~mask;
1796 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797 }
1798
Michael Chan993ac7b2009-10-10 13:46:56 +00001799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001814 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001825
1826 return IRQ_HANDLED;
1827}
1828
1829/* end of fast path */
1830
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001832
1833/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834
1835/*
1836 * General service functions
1837 */
1838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001840{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001845 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001846
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
1854
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
Eliezer Tamirf1410642008-02-28 11:51:50 -08001862 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
1869
Eilon Greenstein46230472008-08-25 15:23:30 -07001870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001872 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001875 if (lock_status & resource_bit)
1876 return 0;
1877
1878 msleep(5);
1879 }
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
1883
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001890
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
Eliezer Tamirf1410642008-02-28 11:51:50 -08001906 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001907 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
1912 }
1913
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001914 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001915 return 0;
1916}
1917
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001918/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001920{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001921 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001922
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001925}
1926
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001927static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001928{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001932 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001933}
1934
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
1979
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
1992
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
2000
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
2007
2008 default:
2009 break;
2010 }
2011
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002014
2015 return 0;
2016}
2017
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
Eliezer Tamirf1410642008-02-28 11:51:50 -08002064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065{
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
2068
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
2073 }
2074
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
2086
Eilon Greenstein6378c022008-08-13 15:59:25 -07002087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
2093
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
2099
2100 default:
2101 break;
2102 }
2103
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002106
2107 return 0;
2108}
2109
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002111{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002116 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002117 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002118
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002121 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002122 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002123
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002126 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002127
Eliezer Tamirf1410642008-02-28 11:51:50 -08002128 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002131 break;
2132 }
2133}
2134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135static void bnx2x_link_report(struct bnx2x *bp)
2136{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002137 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2141 }
2142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002144 u16 line_speed;
2145
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
2160 printk("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002161
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002162 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002163 printk("full duplex");
2164 else
2165 printk("half duplex");
2166
David S. Millerc0700f92008-12-16 23:53:20 -08002167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002172 printk("& transmit ");
2173 } else {
2174 printk(", transmit ");
2175 }
2176 printk("flow control ON");
2177 }
2178 printk("\n");
2179
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183 }
2184}
2185
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002187{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
Eilon Greenstein19680c42008-08-13 15:47:33 -07002191 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002194 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002196 else
David S. Millerc0700f92008-12-16 23:53:20 -08002197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002198
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002199 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
Eilon Greenstein19680c42008-08-13 15:47:33 -07002204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002205
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002206 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002207
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002208 bnx2x_calc_fc_adv(bp);
2209
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002212 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002213 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002214
Eilon Greenstein19680c42008-08-13 15:47:33 -07002215 return rc;
2216 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002218 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002219}
2220
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002221static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002222{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002223 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002224 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002226 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002227
Eilon Greenstein19680c42008-08-13 15:47:33 -07002228 bnx2x_calc_fc_adv(bp);
2229 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002230 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002231}
2232
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002235 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002236 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002238 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002239 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002241}
2242
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
2246
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002247 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002249 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002250
2251 return rc;
2252}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002254static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002255{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002259
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002263
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002266
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002277
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002280
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002287}
2288
Eilon Greenstein2691d512009-08-12 08:22:08 +00002289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002333}
2334
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002371 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002398/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002399static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002400{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002406 if (bp->link_vars.link_up) {
2407
Eilon Greenstein1c063282009-02-12 08:36:43 +00002408 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002418 pause_enabled);
2419 }
2420
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002429 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002433 /* indicate link status */
2434 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002435
2436 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002437 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002438 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002439 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002440
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002441 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002446 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002450
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002451 if (bp->link_vars.link_up) {
2452 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002453
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002467 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002468}
2469
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002470static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002471{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002473 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002474
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
Eilon Greenstein2691d512009-08-12 08:22:08 +00002482 bnx2x_calc_vn_weight_sum(bp);
2483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002484 /* indicate link status */
2485 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002486}
2487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002502}
2503
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002504/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
Eilon Greenstein2691d512009-08-12 08:22:08 +00002512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002521 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002546 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002558
2559 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563 netif_carrier_off(bp->dev);
2564}
2565
2566static void bnx2x_e1h_enable(struct bnx2x *bp)
2567{
2568 int port = BP_PORT(bp);
2569
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
Eilon Greenstein2691d512009-08-12 08:22:08 +00002572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2574
Eilon Greenstein061bc702009-10-15 00:18:47 -07002575 /*
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002579}
2580
2581static void bnx2x_update_min_max(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584 int vn, i;
2585
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2588
2589 bnx2x_calc_vn_weight_sum(bp);
2590
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594 if (bp->port.pmf) {
2595 int func;
2596
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2601
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605 }
2606
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2612 }
2613}
2614
2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002628 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002629
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002633 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002634
2635 bnx2x_e1h_enable(bp);
2636 }
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638 }
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643 }
2644
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650}
2651
Michael Chan28912902009-10-10 13:46:53 +00002652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002681/* the slow path queue is odd since completions arrive on the fastpath ring */
2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2684{
Michael Chan28912902009-10-10 13:46:53 +00002685 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693#ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696#endif
2697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002698 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002702 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002703 bnx2x_panic();
2704 return -EBUSY;
2705 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002706
Michael Chan28912902009-10-10 13:46:53 +00002707 spe = bnx2x_sp_get_next(bp);
2708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002710 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002714 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002715 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
Michael Chan28912902009-10-10 13:46:53 +00002718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720
2721 bp->spq_left--;
2722
Michael Chan28912902009-10-10 13:46:53 +00002723 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002724 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002725 return 0;
2726}
2727
2728/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002729static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002732 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2742
2743 msleep(5);
2744 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002745 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 rc = -EBUSY;
2748 }
2749
2750 return rc;
2751}
2752
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002753/* release split MCP access lock register */
2754static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002755{
2756 u32 val = 0;
2757
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759}
2760
2761static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762{
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2765
2766 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2770 }
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2774 }
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2778 }
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2782 }
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2786 }
2787 return rc;
2788}
2789
2790/*
2791 * slow path service functions
2792 */
2793
2794static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002796 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002803 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002804 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002805
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2808
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2811
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002819
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002823
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002826
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002827 bnx2x_acquire_phy_lock(bp);
2828
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002829 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002830 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002831 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002833 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002834
2835 /* handle unicore attn? */
2836 }
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853 }
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857 }
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861 }
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874 }
2875 }
2876
2877 } /* if hardwired */
2878
Eilon Greenstein5c862842008-08-13 15:51:48 -07002879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002882
2883 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002884 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002885 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002886 bnx2x_release_phy_lock(bp);
2887 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002888}
2889
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002890static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891{
2892 int port = BP_PORT(bp);
2893
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2899
2900 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902 " the driver to shutdown the card to prevent permanent"
2903 " damage. Please contact Dell Support for assistance\n",
2904 bp->dev->name);
2905}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002906
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002907static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002909 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002910 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002911 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002912
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002913 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002915
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002916 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002917
2918 val = REG_RD(bp, reg_offset);
2919 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920 REG_WR(bp, reg_offset, val);
2921
2922 BNX2X_ERR("SPIO5 hw attention\n");
2923
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002924 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002927 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002928 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002929 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002930 /* The PHY reset is controlled by GPIO 1 */
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002933 break;
2934
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002935 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936 /* The PHY reset is controlled by GPIO 1 */
2937 /* fake the port number to cancel the swap done in
2938 set_gpio() */
2939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941 port = (swap_val && swap_override) ^ 1;
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944 break;
2945
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002946 default:
2947 break;
2948 }
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002949 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002950 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002951
Eilon Greenstein589abe32009-02-12 08:36:55 +00002952 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954 bnx2x_acquire_phy_lock(bp);
2955 bnx2x_handle_module_detect_int(&bp->link_params);
2956 bnx2x_release_phy_lock(bp);
2957 }
2958
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002959 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961 val = REG_RD(bp, reg_offset);
2962 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963 REG_WR(bp, reg_offset, val);
2964
2965 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002966 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002967 bnx2x_panic();
2968 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002969}
2970
2971static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972{
2973 u32 val;
2974
Eilon Greenstein0626b892009-02-12 08:38:14 +00002975 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002976
2977 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979 /* DORQ discard attention */
2980 if (val & 0x2)
2981 BNX2X_ERR("FATAL error from DORQ\n");
2982 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002983
2984 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2988
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002997 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002998 bnx2x_panic();
2999 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003000}
3001
3002static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003{
3004 u32 val;
3005
3006 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010 /* CFC error attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from CFC\n");
3013 }
3014
3015 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019 /* RQ_USDMDP_FIFO_OVERFLOW */
3020 if (val & 0x18000)
3021 BNX2X_ERR("FATAL error from PXP\n");
3022 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003023
3024 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026 int port = BP_PORT(bp);
3027 int reg_offset;
3028
3029 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032 val = REG_RD(bp, reg_offset);
3033 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034 REG_WR(bp, reg_offset, val);
3035
3036 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003037 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003038 bnx2x_panic();
3039 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003040}
3041
3042static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003044 u32 val;
3045
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003046 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003048 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049 int func = BP_FUNC(bp);
3050
3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3056 bnx2x_dcc_event(bp,
3057 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003058 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003059 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003060 bnx2x_pmf_update(bp);
3061
3062 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003063
3064 BNX2X_ERR("MC assert!\n");
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069 bnx2x_panic();
3070
3071 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073 BNX2X_ERR("MCP assert!\n");
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003075 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003076
3077 } else
3078 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079 }
3080
3081 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003082 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083 if (attn & BNX2X_GRC_TIMEOUT) {
3084 val = CHIP_IS_E1H(bp) ?
3085 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087 }
3088 if (attn & BNX2X_GRC_RSV) {
3089 val = CHIP_IS_E1H(bp) ?
3090 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003093 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003094 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003095}
3096
3097static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003099 struct attn_route attn;
3100 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003101 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003102 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003103 u32 reg_addr;
3104 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003105 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003106
3107 /* need to take HW lock because MCP or other port might also
3108 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003109 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003110
3111 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003115 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003117
3118 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119 if (deasserted & (1 << index)) {
3120 group_mask = bp->attn_group[index];
3121
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003122 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123 index, group_mask.sig[0], group_mask.sig[1],
3124 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003125
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003126 bnx2x_attn_int_deasserted3(bp,
3127 attn.sig[3] & group_mask.sig[3]);
3128 bnx2x_attn_int_deasserted1(bp,
3129 attn.sig[1] & group_mask.sig[1]);
3130 bnx2x_attn_int_deasserted2(bp,
3131 attn.sig[2] & group_mask.sig[2]);
3132 bnx2x_attn_int_deasserted0(bp,
3133 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003134
3135 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003136 HW_PRTY_ASSERT_SET_0) ||
3137 (attn.sig[1] & group_mask.sig[1] &
3138 HW_PRTY_ASSERT_SET_1) ||
3139 (attn.sig[2] & group_mask.sig[2] &
3140 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003141 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003142 }
3143 }
3144
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003145 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003146
Eilon Greenstein5c862842008-08-13 15:51:48 -07003147 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003148
3149 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003150 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003152 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003153
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003154 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003155 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003156
3157 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003162
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003163 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3164 aeu_mask, deasserted);
3165 aeu_mask |= (deasserted & 0xff);
3166 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168 REG_WR(bp, reg_addr, aeu_mask);
3169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003170
3171 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172 bp->attn_state &= ~deasserted;
3173 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174}
3175
3176static void bnx2x_attn_int(struct bnx2x *bp)
3177{
3178 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003179 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits);
3181 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003183 u32 attn_state = bp->attn_state;
3184
3185 /* look for changed bits */
3186 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3187 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3188
3189 DP(NETIF_MSG_HW,
3190 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3191 attn_bits, attn_ack, asserted, deasserted);
3192
3193 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003194 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003195
3196 /* handle bits that were raised */
3197 if (asserted)
3198 bnx2x_attn_int_asserted(bp, asserted);
3199
3200 if (deasserted)
3201 bnx2x_attn_int_deasserted(bp, deasserted);
3202}
3203
3204static void bnx2x_sp_task(struct work_struct *work)
3205{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003206 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003207 u16 status;
3208
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003209
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003210 /* Return here if interrupt is disabled */
3211 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003212 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003213 return;
3214 }
3215
3216 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003217/* if (status == 0) */
3218/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219
Eilon Greenstein3196a882008-08-13 15:58:49 -07003220 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003221
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003222 /* HW attentions */
3223 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003224 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003225
Eilon Greenstein68d59482009-01-14 21:27:36 -08003226 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233 IGU_INT_NOP, 1);
3234 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003236
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003237}
3238
3239static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240{
3241 struct net_device *dev = dev_instance;
3242 struct bnx2x *bp = netdev_priv(dev);
3243
3244 /* Return here if interrupt is disabled */
3245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003247 return IRQ_HANDLED;
3248 }
3249
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003251
3252#ifdef BNX2X_STOP_ON_ERROR
3253 if (unlikely(bp->panic))
3254 return IRQ_HANDLED;
3255#endif
3256
Michael Chan993ac7b2009-10-10 13:46:56 +00003257#ifdef BCM_CNIC
3258 {
3259 struct cnic_ops *c_ops;
3260
3261 rcu_read_lock();
3262 c_ops = rcu_dereference(bp->cnic_ops);
3263 if (c_ops)
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3265 rcu_read_unlock();
3266 }
3267#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003269
3270 return IRQ_HANDLED;
3271}
3272
3273/* end of slow path */
3274
3275/* Statistics */
3276
3277/****************************************************************************
3278* Macros
3279****************************************************************************/
3280
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003281/* sum[hi:lo] += add[hi:lo] */
3282#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283 do { \
3284 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003285 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003286 } while (0)
3287
3288/* difference = minuend - subtrahend */
3289#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003291 if (m_lo < s_lo) { \
3292 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003293 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003294 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003295 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003296 d_hi--; \
3297 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003298 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003299 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003300 d_hi = 0; \
3301 d_lo = 0; \
3302 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003303 } else { \
3304 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003305 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003306 d_hi = 0; \
3307 d_lo = 0; \
3308 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003309 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003310 d_hi = m_hi - s_hi; \
3311 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003312 } \
3313 } \
3314 } while (0)
3315
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003316#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003317 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003318 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003324 } while (0)
3325
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003326#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003327 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003328 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329 diff.lo, new->s##_lo, old->s##_lo); \
3330 ADD_64(estats->t##_hi, diff.hi, \
3331 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003332 } while (0)
3333
3334/* sum[hi:lo] += add */
3335#define ADD_EXTEND_64(s_hi, s_lo, a) \
3336 do { \
3337 s_lo += a; \
3338 s_hi += (s_lo < a) ? 1 : 0; \
3339 } while (0)
3340
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003341#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003343 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344 pstats->mac_stx[1].s##_lo, \
3345 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003346 } while (0)
3347
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003348#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003349 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003350 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353 } while (0)
3354
3355#define UPDATE_EXTEND_USTAT(s, t) \
3356 do { \
3357 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358 old_uclient->s = uclient->s; \
3359 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003360 } while (0)
3361
3362#define UPDATE_EXTEND_XSTAT(s, t) \
3363 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003364 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003366 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 } while (0)
3368
3369/* minuend -= subtrahend */
3370#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371 do { \
3372 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373 } while (0)
3374
3375/* minuend[hi:lo] -= subtrahend */
3376#define SUB_EXTEND_64(m_hi, m_lo, s) \
3377 do { \
3378 SUB_64(m_hi, 0, m_lo, s); \
3379 } while (0)
3380
3381#define SUB_EXTEND_USTAT(s, t) \
3382 do { \
3383 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003385 } while (0)
3386
3387/*
3388 * General service functions
3389 */
3390
3391static inline long bnx2x_hilo(u32 *hiref)
3392{
3393 u32 lo = *(hiref + 1);
3394#if (BITS_PER_LONG == 64)
3395 u32 hi = *hiref;
3396
3397 return HILO_U64(hi, lo);
3398#else
3399 return lo;
3400#endif
3401}
3402
3403/*
3404 * Init service functions
3405 */
3406
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003407static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408{
3409 if (!bp->stats_pending) {
3410 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003411 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003412
3413 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003414 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003415 for_each_queue(bp, i)
3416 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003417
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419 ((u32 *)&ramrod_data)[1],
3420 ((u32 *)&ramrod_data)[0], 0);
3421 if (rc == 0) {
3422 /* stats ramrod has it's own slot on the spq */
3423 bp->spq_left++;
3424 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003425 }
3426 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003427}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003428
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003429static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430{
3431 struct dmae_command *dmae = &bp->stats_dmae;
3432 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003435 if (CHIP_REV_IS_SLOW(bp))
3436 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003437
3438 /* loader */
3439 if (bp->executer_idx) {
3440 int loader_idx = PMF_DMAE_C(bp);
3441
3442 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446 DMAE_CMD_DST_RESET |
3447#ifdef __BIG_ENDIAN
3448 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449#else
3450 DMAE_CMD_ENDIANITY_DW_SWAP |
3451#endif
3452 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453 DMAE_CMD_PORT_0) |
3454 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458 sizeof(struct dmae_command) *
3459 (loader_idx + 1)) >> 2;
3460 dmae->dst_addr_hi = 0;
3461 dmae->len = sizeof(struct dmae_command) >> 2;
3462 if (CHIP_IS_E1(bp))
3463 dmae->len--;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467
3468 *stats_comp = 0;
3469 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471 } else if (bp->func_stx) {
3472 *stats_comp = 0;
3473 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474 }
3475}
3476
3477static int bnx2x_stats_comp(struct bnx2x *bp)
3478{
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480 int cnt = 10;
3481
3482 might_sleep();
3483 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003484 if (!cnt) {
3485 BNX2X_ERR("timeout waiting for stats finished\n");
3486 break;
3487 }
3488 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003489 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003490 }
3491 return 1;
3492}
3493
3494/*
3495 * Statistics service functions
3496 */
3497
3498static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499{
3500 struct dmae_command *dmae;
3501 u32 opcode;
3502 int loader_idx = PMF_DMAE_C(bp);
3503 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505 /* sanity */
3506 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003508 return;
3509 }
3510
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003511 bp->executer_idx = 0;
3512
3513 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514 DMAE_CMD_C_ENABLE |
3515 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516#ifdef __BIG_ENDIAN
3517 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518#else
3519 DMAE_CMD_ENDIANITY_DW_SWAP |
3520#endif
3521 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526 dmae->src_addr_lo = bp->port.port_stx >> 2;
3527 dmae->src_addr_hi = 0;
3528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->len = DMAE_LEN32_RD_MAX;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
3534
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
3541 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003543 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_val = DMAE_COMP_VAL;
3547
3548 *stats_comp = 0;
3549 bnx2x_hw_stats_post(bp);
3550 bnx2x_stats_comp(bp);
3551}
3552
3553static void bnx2x_port_stats_init(struct bnx2x *bp)
3554{
3555 struct dmae_command *dmae;
3556 int port = BP_PORT(bp);
3557 int vn = BP_E1HVN(bp);
3558 u32 opcode;
3559 int loader_idx = PMF_DMAE_C(bp);
3560 u32 mac_addr;
3561 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563 /* sanity */
3564 if (!bp->link_vars.link_up || !bp->port.pmf) {
3565 BNX2X_ERR("BUG!\n");
3566 return;
3567 }
3568
3569 bp->executer_idx = 0;
3570
3571 /* MCP */
3572 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575#ifdef __BIG_ENDIAN
3576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577#else
3578 DMAE_CMD_ENDIANITY_DW_SWAP |
3579#endif
3580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581 (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583 if (bp->port.port_stx) {
3584
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_port_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
3595 }
3596
3597 if (bp->func_stx) {
3598
3599 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600 dmae->opcode = opcode;
3601 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->dst_addr_lo = bp->func_stx >> 2;
3604 dmae->dst_addr_hi = 0;
3605 dmae->len = sizeof(struct host_func_stats) >> 2;
3606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607 dmae->comp_addr_hi = 0;
3608 dmae->comp_val = 1;
3609 }
3610
3611 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615#ifdef __BIG_ENDIAN
3616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617#else
3618 DMAE_CMD_ENDIANITY_DW_SWAP |
3619#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003622
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003623 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003624
3625 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626 NIG_REG_INGRESS_BMAC0_MEM);
3627
3628 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629 BIGMAC_REGISTER_TX_STAT_GTBYT */
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = (mac_addr +
3633 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634 dmae->src_addr_hi = 0;
3635 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646 dmae->opcode = opcode;
3647 dmae->src_addr_lo = (mac_addr +
3648 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649 dmae->src_addr_hi = 0;
3650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003653 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003654 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003660 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003661
3662 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3675 dmae->comp_val = 1;
3676
3677 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679 dmae->opcode = opcode;
3680 dmae->src_addr_lo = (mac_addr +
3681 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682 dmae->src_addr_hi = 0;
3683 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003686 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 dmae->len = 1;
3688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689 dmae->comp_addr_hi = 0;
3690 dmae->comp_val = 1;
3691
3692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694 dmae->opcode = opcode;
3695 dmae->src_addr_lo = (mac_addr +
3696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697 dmae->src_addr_hi = 0;
3698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003700 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003701 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003702 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3706 }
3707
3708 /* NIG */
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3720
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = opcode;
3723 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725 dmae->src_addr_hi = 0;
3726 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730 dmae->len = (2*sizeof(u32)) >> 2;
3731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732 dmae->comp_addr_hi = 0;
3733 dmae->comp_val = 1;
3734
3735 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003736 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739#ifdef __BIG_ENDIAN
3740 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741#else
3742 DMAE_CMD_ENDIANITY_DW_SWAP |
3743#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003744 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745 (vn << DMAE_CMD_E1HVN_SHIFT));
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003748 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_val = DMAE_COMP_VAL;
3757
3758 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759}
3760
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003761static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003762{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003763 struct dmae_command *dmae = &bp->stats_dmae;
3764 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003765
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003766 /* sanity */
3767 if (!bp->func_stx) {
3768 BNX2X_ERR("BUG!\n");
3769 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003770 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003771
3772 bp->executer_idx = 0;
3773 memset(dmae, 0, sizeof(struct dmae_command));
3774
3775 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778#ifdef __BIG_ENDIAN
3779 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780#else
3781 DMAE_CMD_ENDIANITY_DW_SWAP |
3782#endif
3783 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->dst_addr_lo = bp->func_stx >> 2;
3788 dmae->dst_addr_hi = 0;
3789 dmae->len = sizeof(struct host_func_stats) >> 2;
3790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_val = DMAE_COMP_VAL;
3793
3794 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003795}
3796
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003797static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003798{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003799 if (bp->port.pmf)
3800 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003802 else if (bp->func_stx)
3803 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003804
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807}
3808
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003809static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003810{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003811 bnx2x_stats_comp(bp);
3812 bnx2x_stats_pmf_update(bp);
3813 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003814}
3815
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003816static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003818 bnx2x_stats_comp(bp);
3819 bnx2x_stats_start(bp);
3820}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003822static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823{
3824 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003826 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003827 struct {
3828 u32 lo;
3829 u32 hi;
3830 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003831
3832 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003838 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003839 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003841 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844 UPDATE_STAT64(tx_stat_gt127,
3845 tx_stat_etherstatspkts65octetsto127octets);
3846 UPDATE_STAT64(tx_stat_gt255,
3847 tx_stat_etherstatspkts128octetsto255octets);
3848 UPDATE_STAT64(tx_stat_gt511,
3849 tx_stat_etherstatspkts256octetsto511octets);
3850 UPDATE_STAT64(tx_stat_gt1023,
3851 tx_stat_etherstatspkts512octetsto1023octets);
3852 UPDATE_STAT64(tx_stat_gt1518,
3853 tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858 UPDATE_STAT64(tx_stat_gterr,
3859 tx_stat_dot3statsinternalmactransmiterrors);
3860 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003871}
3872
3873static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874{
3875 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003878
3879 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003910
3911 estats->pause_frames_received_hi =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913 estats->pause_frames_received_lo =
3914 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915 ADD_64(estats->pause_frames_received_hi,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917 estats->pause_frames_received_lo,
3918 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920 estats->pause_frames_sent_hi =
3921 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922 estats->pause_frames_sent_lo =
3923 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924 ADD_64(estats->pause_frames_sent_hi,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926 estats->pause_frames_sent_lo,
3927 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003928}
3929
3930static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931{
3932 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933 struct nig_stats *old = &(bp->port.old_nig_stats);
3934 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003936 struct {
3937 u32 lo;
3938 u32 hi;
3939 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003940 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003941
3942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943 bnx2x_bmac_stats_update(bp);
3944
3945 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946 bnx2x_emac_stats_update(bp);
3947
3948 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003949 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003950 return -1;
3951 }
3952
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003953 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003955 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003957
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003958 UPDATE_STAT64_NIG(egress_mac_pkt0,
3959 etherstatspkts1024octetsto1522octets);
3960 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003961
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003962 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003963
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003964 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965 sizeof(struct mac_stx));
3966 estats->brb_drop_hi = pstats->brb_drop_hi;
3967 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003968
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003969 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003970
Eilon Greensteinde832a52009-02-12 08:36:33 +00003971 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972 if (nig_timer_max != estats->nig_timer_max) {
3973 estats->nig_timer_max = nig_timer_max;
3974 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975 }
3976
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003977 return 0;
3978}
3979
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003980static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003981{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003982 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003983 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003984 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003985 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003987 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003988
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00003989 memcpy(&(fstats->total_bytes_received_hi),
3990 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00003991 sizeof(struct host_func_stats) - 2*sizeof(u32));
3992 estats->error_bytes_received_hi = 0;
3993 estats->error_bytes_received_lo = 0;
3994 estats->etherstatsoverrsizepkts_hi = 0;
3995 estats->etherstatsoverrsizepkts_lo = 0;
3996 estats->no_buff_discard_hi = 0;
3997 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003998
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003999 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004000 struct bnx2x_fastpath *fp = &bp->fp[i];
4001 int cl_id = fp->cl_id;
4002 struct tstorm_per_client_stats *tclient =
4003 &stats->tstorm_common.client_statistics[cl_id];
4004 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005 struct ustorm_per_client_stats *uclient =
4006 &stats->ustorm_common.client_statistics[cl_id];
4007 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008 struct xstorm_per_client_stats *xclient =
4009 &stats->xstorm_common.client_statistics[cl_id];
4010 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012 u32 diff;
4013
4014 /* are storm stats valid? */
4015 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016 bp->stats_counter) {
4017 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018 " xstorm counter (%d) != stats_counter (%d)\n",
4019 i, xclient->stats_counter, bp->stats_counter);
4020 return -1;
4021 }
4022 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023 bp->stats_counter) {
4024 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025 " tstorm counter (%d) != stats_counter (%d)\n",
4026 i, tclient->stats_counter, bp->stats_counter);
4027 return -2;
4028 }
4029 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030 bp->stats_counter) {
4031 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032 " ustorm counter (%d) != stats_counter (%d)\n",
4033 i, uclient->stats_counter, bp->stats_counter);
4034 return -4;
4035 }
4036
4037 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004038 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004039 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004040 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042 ADD_64(qstats->total_bytes_received_hi,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044 qstats->total_bytes_received_lo,
4045 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047 ADD_64(qstats->total_bytes_received_hi,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049 qstats->total_bytes_received_lo,
4050 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052 qstats->valid_bytes_received_hi =
4053 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004054 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004055 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004056
Eilon Greensteinde832a52009-02-12 08:36:33 +00004057 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004058 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004059 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004060 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004061
4062 ADD_64(qstats->total_bytes_received_hi,
4063 qstats->error_bytes_received_hi,
4064 qstats->total_bytes_received_lo,
4065 qstats->error_bytes_received_lo);
4066
4067 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068 total_unicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070 total_multicast_packets_received);
4071 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072 total_broadcast_packets_received);
4073 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074 etherstatsoverrsizepkts);
4075 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078 total_unicast_packets_received);
4079 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080 total_multicast_packets_received);
4081 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004088 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004089 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004090 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092 ADD_64(qstats->total_bytes_transmitted_hi,
4093 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094 qstats->total_bytes_transmitted_lo,
4095 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097 ADD_64(qstats->total_bytes_transmitted_hi,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099 qstats->total_bytes_transmitted_lo,
4100 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004101
4102 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103 total_unicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105 total_multicast_packets_transmitted);
4106 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107 total_broadcast_packets_transmitted);
4108
4109 old_tclient->checksum_discard = tclient->checksum_discard;
4110 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 qstats->total_bytes_received_hi,
4114 fstats->total_bytes_received_lo,
4115 qstats->total_bytes_received_lo);
4116 ADD_64(fstats->total_bytes_transmitted_hi,
4117 qstats->total_bytes_transmitted_hi,
4118 fstats->total_bytes_transmitted_lo,
4119 qstats->total_bytes_transmitted_lo);
4120 ADD_64(fstats->total_unicast_packets_received_hi,
4121 qstats->total_unicast_packets_received_hi,
4122 fstats->total_unicast_packets_received_lo,
4123 qstats->total_unicast_packets_received_lo);
4124 ADD_64(fstats->total_multicast_packets_received_hi,
4125 qstats->total_multicast_packets_received_hi,
4126 fstats->total_multicast_packets_received_lo,
4127 qstats->total_multicast_packets_received_lo);
4128 ADD_64(fstats->total_broadcast_packets_received_hi,
4129 qstats->total_broadcast_packets_received_hi,
4130 fstats->total_broadcast_packets_received_lo,
4131 qstats->total_broadcast_packets_received_lo);
4132 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133 qstats->total_unicast_packets_transmitted_hi,
4134 fstats->total_unicast_packets_transmitted_lo,
4135 qstats->total_unicast_packets_transmitted_lo);
4136 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137 qstats->total_multicast_packets_transmitted_hi,
4138 fstats->total_multicast_packets_transmitted_lo,
4139 qstats->total_multicast_packets_transmitted_lo);
4140 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141 qstats->total_broadcast_packets_transmitted_hi,
4142 fstats->total_broadcast_packets_transmitted_lo,
4143 qstats->total_broadcast_packets_transmitted_lo);
4144 ADD_64(fstats->valid_bytes_received_hi,
4145 qstats->valid_bytes_received_hi,
4146 fstats->valid_bytes_received_lo,
4147 qstats->valid_bytes_received_lo);
4148
4149 ADD_64(estats->error_bytes_received_hi,
4150 qstats->error_bytes_received_hi,
4151 estats->error_bytes_received_lo,
4152 qstats->error_bytes_received_lo);
4153 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154 qstats->etherstatsoverrsizepkts_hi,
4155 estats->etherstatsoverrsizepkts_lo,
4156 qstats->etherstatsoverrsizepkts_lo);
4157 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159 }
4160
4161 ADD_64(fstats->total_bytes_received_hi,
4162 estats->rx_stat_ifhcinbadoctets_hi,
4163 fstats->total_bytes_received_lo,
4164 estats->rx_stat_ifhcinbadoctets_lo);
4165
4166 memcpy(estats, &(fstats->total_bytes_received_hi),
4167 sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 estats->rx_stat_dot3statsframestoolong_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004173 ADD_64(estats->error_bytes_received_hi,
4174 estats->rx_stat_ifhcinbadoctets_hi,
4175 estats->error_bytes_received_lo,
4176 estats->rx_stat_ifhcinbadoctets_lo);
4177
Eilon Greensteinde832a52009-02-12 08:36:33 +00004178 if (bp->port.pmf) {
4179 estats->mac_filter_discard =
4180 le32_to_cpu(tport->mac_filter_discard);
4181 estats->xxoverflow_discard =
4182 le32_to_cpu(tport->xxoverflow_discard);
4183 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004184 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004185 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004187
4188 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
Eilon Greensteinde832a52009-02-12 08:36:33 +00004190 bp->stats_pending = 0;
4191
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004192 return 0;
4193}
4194
4195static void bnx2x_net_stats_update(struct bnx2x *bp)
4196{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004198 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004199 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200
4201 nstats->rx_packets =
4202 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206 nstats->tx_packets =
4207 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
Eilon Greensteinde832a52009-02-12 08:36:33 +00004211 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214
Eilon Greensteinde832a52009-02-12 08:36:33 +00004215 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004216 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004217 nstats->rx_dropped +=
4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220 nstats->tx_dropped = 0;
4221
4222 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004223 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004224
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004225 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004226 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004227
4228 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004229 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232 bnx2x_hilo(&estats->brb_truncate_hi);
4233 nstats->rx_crc_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235 nstats->rx_frame_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238 nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240 nstats->rx_errors = nstats->rx_length_errors +
4241 nstats->rx_over_errors +
4242 nstats->rx_crc_errors +
4243 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004244 nstats->rx_fifo_errors +
4245 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004246
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004247 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004248 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250 nstats->tx_carrier_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004252 nstats->tx_fifo_errors = 0;
4253 nstats->tx_heartbeat_errors = 0;
4254 nstats->tx_window_errors = 0;
4255
4256 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004257 nstats->tx_carrier_errors +
4258 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259}
4260
4261static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262{
4263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264 int i;
4265
4266 estats->driver_xoff = 0;
4267 estats->rx_err_discard_pkt = 0;
4268 estats->rx_skb_alloc_failed = 0;
4269 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004270 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273 estats->driver_xoff += qstats->driver_xoff;
4274 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276 estats->hw_csum_err += qstats->hw_csum_err;
4277 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004278}
4279
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004280static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004281{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004283
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004284 if (*stats_comp != DMAE_COMP_VAL)
4285 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004286
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004287 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004288 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289
Eilon Greensteinde832a52009-02-12 08:36:33 +00004290 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292 bnx2x_panic();
4293 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294 }
4295
Eilon Greensteinde832a52009-02-12 08:36:33 +00004296 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp);
4298
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004299 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004300 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004301 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004302 struct tstorm_per_client_stats *old_tclient =
4303 &bp->fp->old_tclient;
4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004305 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004306 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004307 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004308
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4311 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004312 bnx2x_tx_avail(fp0_tx),
4313 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004314 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4315 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004316 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317 fp0_rx->rx_comp_cons),
4318 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004319 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4320 "brb truncate %u\n",
4321 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322 qstats->driver_xoff,
4323 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004324 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004325 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326 "mac_discard %u mac_filter_discard %u "
4327 "xxovrflow_discard %u brb_truncate_discard %u "
4328 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004329 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004330 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331 bnx2x_hilo(&qstats->no_buff_discard_hi),
4332 estats->mac_discard, estats->mac_filter_discard,
4333 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004334 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335
4336 for_each_queue(bp, i) {
4337 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338 bnx2x_fp(bp, i, tx_pkt),
4339 bnx2x_fp(bp, i, rx_pkt),
4340 bnx2x_fp(bp, i, rx_calls));
4341 }
4342 }
4343
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004344 bnx2x_hw_stats_post(bp);
4345 bnx2x_storm_stats_post(bp);
4346}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004348static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349{
4350 struct dmae_command *dmae;
4351 u32 opcode;
4352 int loader_idx = PMF_DMAE_C(bp);
4353 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004354
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004355 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004356
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004357 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358 DMAE_CMD_C_ENABLE |
4359 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004361 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004363 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004364#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004365 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368 if (bp->port.port_stx) {
4369
4370 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371 if (bp->func_stx)
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373 else
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004378 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004379 dmae->len = sizeof(struct host_port_stats) >> 2;
4380 if (bp->func_stx) {
4381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382 dmae->comp_addr_hi = 0;
4383 dmae->comp_val = 1;
4384 } else {
4385 dmae->comp_addr_lo =
4386 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi =
4388 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004390
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004391 *stats_comp = 0;
4392 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004393 }
4394
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004395 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004396
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->dst_addr_lo = bp->func_stx >> 2;
4402 dmae->dst_addr_hi = 0;
4403 dmae->len = sizeof(struct host_func_stats) >> 2;
4404 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4407
4408 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004409 }
4410}
4411
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004412static void bnx2x_stats_stop(struct bnx2x *bp)
4413{
4414 int update = 0;
4415
4416 bnx2x_stats_comp(bp);
4417
4418 if (bp->port.pmf)
4419 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421 update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423 if (update) {
4424 bnx2x_net_stats_update(bp);
4425
4426 if (bp->port.pmf)
4427 bnx2x_port_stats_stop(bp);
4428
4429 bnx2x_hw_stats_post(bp);
4430 bnx2x_stats_comp(bp);
4431 }
4432}
4433
4434static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435{
4436}
4437
4438static const struct {
4439 void (*action)(struct bnx2x *bp);
4440 enum bnx2x_stats_state next_state;
4441} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442/* state event */
4443{
4444/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4446/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448},
4449{
4450/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4451/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4452/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4453/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4454}
4455};
4456
4457static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458{
4459 enum bnx2x_stats_state state = bp->stats_state;
4460
4461 bnx2x_stats_stm[state][event].action(bp);
4462 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
Eilon Greenstein89246652009-08-12 08:23:56 +00004464 /* Make sure the state has been "changed" */
4465 smp_wmb();
4466
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state);
4470}
4471
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004472static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473{
4474 struct dmae_command *dmae;
4475 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477 /* sanity */
4478 if (!bp->port.pmf || !bp->port.port_stx) {
4479 BNX2X_ERR("BUG!\n");
4480 return;
4481 }
4482
4483 bp->executer_idx = 0;
4484
4485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489#ifdef __BIG_ENDIAN
4490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491#else
4492 DMAE_CMD_ENDIANITY_DW_SWAP |
4493#endif
4494 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499 dmae->dst_addr_hi = 0;
4500 dmae->len = sizeof(struct host_port_stats) >> 2;
4501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_val = DMAE_COMP_VAL;
4504
4505 *stats_comp = 0;
4506 bnx2x_hw_stats_post(bp);
4507 bnx2x_stats_comp(bp);
4508}
4509
4510static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511{
4512 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513 int port = BP_PORT(bp);
4514 int func;
4515 u32 func_stx;
4516
4517 /* sanity */
4518 if (!bp->port.pmf || !bp->func_stx) {
4519 BNX2X_ERR("BUG!\n");
4520 return;
4521 }
4522
4523 /* save our func_stx */
4524 func_stx = bp->func_stx;
4525
4526 for (vn = VN_0; vn < vn_max; vn++) {
4527 func = 2*vn + port;
4528
4529 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530 bnx2x_func_stats_init(bp);
4531 bnx2x_hw_stats_post(bp);
4532 bnx2x_stats_comp(bp);
4533 }
4534
4535 /* restore our func_stx */
4536 bp->func_stx = func_stx;
4537}
4538
4539static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540{
4541 struct dmae_command *dmae = &bp->stats_dmae;
4542 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544 /* sanity */
4545 if (!bp->func_stx) {
4546 BNX2X_ERR("BUG!\n");
4547 return;
4548 }
4549
4550 bp->executer_idx = 0;
4551 memset(dmae, 0, sizeof(struct dmae_command));
4552
4553 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556#ifdef __BIG_ENDIAN
4557 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558#else
4559 DMAE_CMD_ENDIANITY_DW_SWAP |
4560#endif
4561 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563 dmae->src_addr_lo = bp->func_stx >> 2;
4564 dmae->src_addr_hi = 0;
4565 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->len = sizeof(struct host_func_stats) >> 2;
4568 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_val = DMAE_COMP_VAL;
4571
4572 *stats_comp = 0;
4573 bnx2x_hw_stats_post(bp);
4574 bnx2x_stats_comp(bp);
4575}
4576
4577static void bnx2x_stats_init(struct bnx2x *bp)
4578{
4579 int port = BP_PORT(bp);
4580 int func = BP_FUNC(bp);
4581 int i;
4582
4583 bp->stats_pending = 0;
4584 bp->executer_idx = 0;
4585 bp->stats_counter = 0;
4586
4587 /* port and func stats for management */
4588 if (!BP_NOMCP(bp)) {
4589 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592 } else {
4593 bp->port.port_stx = 0;
4594 bp->func_stx = 0;
4595 }
4596 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4597 bp->port.port_stx, bp->func_stx);
4598
4599 /* port stats */
4600 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601 bp->port.old_nig_stats.brb_discard =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603 bp->port.old_nig_stats.brb_truncate =
4604 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610 /* function stats */
4611 for_each_queue(bp, i) {
4612 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614 memset(&fp->old_tclient, 0,
4615 sizeof(struct tstorm_per_client_stats));
4616 memset(&fp->old_uclient, 0,
4617 sizeof(struct ustorm_per_client_stats));
4618 memset(&fp->old_xclient, 0,
4619 sizeof(struct xstorm_per_client_stats));
4620 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621 }
4622
4623 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626 bp->stats_state = STATS_STATE_DISABLED;
4627
4628 if (bp->port.pmf) {
4629 if (bp->port.port_stx)
4630 bnx2x_port_stats_base_init(bp);
4631
4632 if (bp->func_stx)
4633 bnx2x_func_stats_base_init(bp);
4634
4635 } else if (bp->func_stx)
4636 bnx2x_func_stats_base_update(bp);
4637}
4638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004639static void bnx2x_timer(unsigned long data)
4640{
4641 struct bnx2x *bp = (struct bnx2x *) data;
4642
4643 if (!netif_running(bp->dev))
4644 return;
4645
4646 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004647 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004648
4649 if (poll) {
4650 struct bnx2x_fastpath *fp = &bp->fp[0];
4651 int rc;
4652
Eilon Greenstein7961f792009-03-02 07:59:31 +00004653 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004654 rc = bnx2x_rx_int(fp, 1000);
4655 }
4656
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 if (!BP_NOMCP(bp)) {
4658 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659 u32 drv_pulse;
4660 u32 mcp_pulse;
4661
4662 ++bp->fw_drv_pulse_wr_seq;
4663 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664 /* TBD - add SYSTEM_TIME */
4665 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004666 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004669 MCP_PULSE_SEQ_MASK);
4670 /* The delta between driver pulse and mcp response
4671 * should be 1 (before mcp response) or 0 (after mcp response)
4672 */
4673 if ((drv_pulse != mcp_pulse) &&
4674 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675 /* someone lost a heartbeat... */
4676 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677 drv_pulse, mcp_pulse);
4678 }
4679 }
4680
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004681 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004683
Eliezer Tamirf1410642008-02-28 11:51:50 -08004684timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685 mod_timer(&bp->timer, jiffies + bp->current_interval);
4686}
4687
4688/* end of Statistics */
4689
4690/* nic init */
4691
4692/*
4693 * nic init service functions
4694 */
4695
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004696static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004698 int port = BP_PORT(bp);
4699
Eilon Greensteinca003922009-08-12 22:53:28 -07004700 /* "CSTORM" */
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004707}
4708
Eilon Greenstein5c862842008-08-13 15:51:48 -07004709static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004711{
4712 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004713 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004715 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004716
4717 /* USTORM */
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4719 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004720 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004721
Eilon Greensteinca003922009-08-12 22:53:28 -07004722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004726 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729
4730 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004733
4734 /* CSTORM */
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4736 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004737 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004738
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004740 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004741 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004742 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004745 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004746
4747 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004749 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004751 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752}
4753
4754static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755{
4756 int func = BP_FUNC(bp);
4757
Eilon Greensteinca003922009-08-12 22:53:28 -07004758 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004759 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004761 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763 sizeof(struct cstorm_def_status_block_u)/4);
4764 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766 sizeof(struct cstorm_def_status_block_c)/4);
4767 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004768 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004770}
4771
4772static void bnx2x_init_def_sb(struct bnx2x *bp,
4773 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004775{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004776 int port = BP_PORT(bp);
4777 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004778 int index, val, reg_offset;
4779 u64 section;
4780
4781 /* ATTN */
4782 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004785
Eliezer Tamir49d66772008-02-28 11:53:13 -08004786 bp->attn_state = 0;
4787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004788 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004791 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004792 bp->attn_group[index].sig[0] = REG_RD(bp,
4793 reg_offset + 0x10*index);
4794 bp->attn_group[index].sig[1] = REG_RD(bp,
4795 reg_offset + 0x4 + 0x10*index);
4796 bp->attn_group[index].sig[2] = REG_RD(bp,
4797 reg_offset + 0x8 + 0x10*index);
4798 bp->attn_group[index].sig[3] = REG_RD(bp,
4799 reg_offset + 0xc + 0x10*index);
4800 }
4801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004802 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803 HC_REG_ATTN_MSG0_ADDR_L);
4804
4805 REG_WR(bp, reg_offset, U64_LO(section));
4806 REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004811 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004812 REG_WR(bp, reg_offset, val);
4813
4814 /* USTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004817 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004818
Eilon Greensteinca003922009-08-12 22:53:28 -07004819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004826
4827 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004830
4831 /* CSTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004834 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004835
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004837 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004838 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004839 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004840 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004843
4844 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004846 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004847
4848 /* TSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004851 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004852
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004854 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004855 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004856 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004857 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004858 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004859 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004860
4861 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004863 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004864
4865 /* XSTORM */
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004868 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004869
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004871 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004872 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004873 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004874 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004875 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004876 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004877
4878 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004881
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004882 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004883 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004884
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004885 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886}
4887
4888static void bnx2x_update_coalesce(struct bnx2x *bp)
4889{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004890 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004891 int i;
4892
4893 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004894 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004895
4896 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004900 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07004901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004905
4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004910 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004915 }
4916}
4917
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004918static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919 struct bnx2x_fastpath *fp, int last)
4920{
4921 int i;
4922
4923 for (i = 0; i < last; i++) {
4924 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925 struct sk_buff *skb = rx_buf->skb;
4926
4927 if (skb == NULL) {
4928 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929 continue;
4930 }
4931
4932 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933 pci_unmap_single(bp->pdev,
4934 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004935 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004936
4937 dev_kfree_skb(skb);
4938 rx_buf->skb = NULL;
4939 }
4940}
4941
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004942static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004944 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004945 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946 ETH_MAX_AGGREGATION_QUEUES_E1H;
4947 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004948 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004949
Eilon Greenstein87942b42009-02-12 08:36:49 +00004950 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004951 DP(NETIF_MSG_IFUP,
4952 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004953
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004954 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004955
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004956 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004957 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004958
Eilon Greenstein32626232008-08-13 15:51:07 -07004959 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004960 fp->tpa_pool[i].skb =
4961 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962 if (!fp->tpa_pool[i].skb) {
4963 BNX2X_ERR("Failed to allocate TPA "
4964 "skb pool for queue[%d] - "
4965 "disabling TPA on this "
4966 "queue!\n", j);
4967 bnx2x_free_tpa_pool(bp, fp, i);
4968 fp->disable_tpa = 1;
4969 break;
4970 }
4971 pci_unmap_addr_set((struct sw_rx_bd *)
4972 &bp->fp->tpa_pool[i],
4973 mapping, 0);
4974 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975 }
4976 }
4977 }
4978
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004979 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004980 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982 fp->rx_bd_cons = 0;
4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004985
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004986 /* "next page" elements initialization */
4987 /* SGE ring */
4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989 struct eth_rx_sge *sge;
4990
4991 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992 sge->addr_hi =
4993 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995 sge->addr_lo =
4996 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998 }
4999
5000 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004 struct eth_rx_bd *rx_bd;
5005
5006 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007 rx_bd->addr_hi =
5008 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005009 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005010 rx_bd->addr_lo =
5011 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005012 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005013 }
5014
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005015 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005016 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017 struct eth_rx_cqe_next_page *nextpg;
5018
5019 nextpg = (struct eth_rx_cqe_next_page *)
5020 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021 nextpg->addr_hi =
5022 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005023 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024 nextpg->addr_lo =
5025 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005026 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005027 }
5028
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005029 /* Allocate SGEs and initialize the ring elements */
5030 for (i = 0, ring_prod = 0;
5031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005032
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034 BNX2X_ERR("was only able to allocate "
5035 "%d rx sges\n", i);
5036 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037 /* Cleanup already allocated elements */
5038 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005039 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005040 fp->disable_tpa = 1;
5041 ring_prod = 0;
5042 break;
5043 }
5044 ring_prod = NEXT_SGE_IDX(ring_prod);
5045 }
5046 fp->rx_sge_prod = ring_prod;
5047
5048 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005049 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005050 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005051 for (i = 0; i < bp->rx_ring_size; i++) {
5052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005054 "%d rx skbs on queue[%d]\n", i, j);
5055 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005056 break;
5057 }
5058 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005059 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005060 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005061 }
5062
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005063 fp->rx_bd_prod = ring_prod;
5064 /* must not have more available CQEs than BDs */
5065 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005067 fp->rx_pkt = fp->rx_calls = 0;
5068
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005069 /* Warning!
5070 * this will generate an interrupt (to the TSTORM)
5071 * must only be done after chip is initialized
5072 */
5073 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005075 if (j != 0)
5076 continue;
5077
5078 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005079 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005080 U64_LO(fp->rx_comp_mapping));
5081 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005082 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005083 U64_HI(fp->rx_comp_mapping));
5084 }
5085}
5086
5087static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088{
5089 int i, j;
5090
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005091 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005092 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005095 struct eth_tx_next_bd *tx_next_bd =
5096 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005097
Eilon Greensteinca003922009-08-12 22:53:28 -07005098 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005099 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005100 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005101 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005102 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005103 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005104 }
5105
Eilon Greensteinca003922009-08-12 22:53:28 -07005106 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107 fp->tx_db.data.zero_fill1 = 0;
5108 fp->tx_db.data.prod = 0;
5109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110 fp->tx_pkt_prod = 0;
5111 fp->tx_pkt_cons = 0;
5112 fp->tx_bd_prod = 0;
5113 fp->tx_bd_cons = 0;
5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115 fp->tx_pkt = 0;
5116 }
5117}
5118
5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005121 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005122
5123 spin_lock_init(&bp->spq_lock);
5124
5125 bp->spq_left = MAX_SPQ_PENDING;
5126 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005127 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128 bp->spq_prod_bd = bp->spq;
5129 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005131 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005132 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005133 REG_WR(bp,
5134 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005135 U64_HI(bp->spq_mapping));
5136
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005137 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138 bp->spq_prod_idx);
5139}
5140
5141static void bnx2x_init_context(struct bnx2x *bp)
5142{
5143 int i;
5144
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005145 /* Rx */
5146 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005149 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005150
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005151 context->ustorm_st_context.common.sb_index_numbers =
5152 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005153 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005154 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005155 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005156 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158 context->ustorm_st_context.common.statistics_counter_id =
5159 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005160 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005161 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005163 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005164 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005165 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005166 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005167 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005168 if (!fp->disable_tpa) {
5169 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005170 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005171 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005172 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005174 context->ustorm_st_context.common.sge_page_base_hi =
5175 U64_HI(fp->rx_sge_mapping);
5176 context->ustorm_st_context.common.sge_page_base_lo =
5177 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005178
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181 context->ustorm_st_context.common.max_sges_for_packet =
5182 ((context->ustorm_st_context.common.
5183 max_sges_for_packet + PAGES_PER_SGE - 1) &
5184 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005185 }
5186
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005187 context->ustorm_ag_context.cdu_usage =
5188 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189 CDU_REGION_NUMBER_UCM_AG,
5190 ETH_CONNECTION_TYPE);
5191
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005192 context->xstorm_ag_context.cdu_reserved =
5193 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194 CDU_REGION_NUMBER_XCM_AG,
5195 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005196 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005197
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005198 /* Tx */
5199 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005200 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005202 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005203
5204 context->cstorm_st_context.sb_index_number =
5205 C_SB_ETH_TX_CQ_INDEX;
5206 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208 context->xstorm_st_context.tx_bd_page_base_hi =
5209 U64_HI(fp->tx_desc_mapping);
5210 context->xstorm_st_context.tx_bd_page_base_lo =
5211 U64_LO(fp->tx_desc_mapping);
5212 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005215}
5216
5217static void bnx2x_init_ind_table(struct bnx2x *bp)
5218{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005219 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005220 int i;
5221
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005222 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005223 return;
5224
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005225 DP(NETIF_MSG_IFUP,
5226 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005230 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231}
5232
Eliezer Tamir49d66772008-02-28 11:53:13 -08005233static void bnx2x_set_client_config(struct bnx2x *bp)
5234{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005235 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005236 int port = BP_PORT(bp);
5237 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005238
Eilon Greensteine7799c52009-01-14 21:30:27 -08005239 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005240 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005241 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005243#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005244 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005245 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005246 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005247 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248 }
5249#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005250
5251 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005252 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
Eliezer Tamir49d66772008-02-28 11:53:13 -08005254 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005255 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005256 ((u32 *)&tstorm_client)[0]);
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005258 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005259 ((u32 *)&tstorm_client)[1]);
5260 }
5261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005262 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005264}
5265
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005269 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005270 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005271 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005272 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005273 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005274 /* All but management unicast packets should pass to the host as well */
5275 u32 llh_mask =
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280
Eilon Greenstein3196a882008-08-13 15:58:49 -07005281 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005282
5283 switch (mode) {
5284 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005285 tstorm_mac_filter.ucast_drop_all = mask;
5286 tstorm_mac_filter.mcast_drop_all = mask;
5287 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005288 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005289
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005290 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005291 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005293
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005294 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005295 tstorm_mac_filter.mcast_accept_all = mask;
5296 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005297 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005298
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005299 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005300 tstorm_mac_filter.ucast_accept_all = mask;
5301 tstorm_mac_filter.mcast_accept_all = mask;
5302 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005303 /* pass management unicast packets as well */
5304 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005306
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005307 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005308 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310 }
5311
Eilon Greenstein581ce432009-07-29 00:20:04 +00005312 REG_WR(bp,
5313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314 llh_mask);
5315
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005318 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005319 ((u32 *)&tstorm_mac_filter)[i]);
5320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005321/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322 ((u32 *)&tstorm_mac_filter)[i]); */
5323 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005324
Eliezer Tamir49d66772008-02-28 11:53:13 -08005325 if (mode != BNX2X_RX_MODE_NONE)
5326 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327}
5328
Eilon Greenstein471de712008-08-13 15:49:35 -07005329static void bnx2x_init_internal_common(struct bnx2x *bp)
5330{
5331 int i;
5332
5333 /* Zero this manually as its initialization is
5334 currently missing in the initTool */
5335 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336 REG_WR(bp, BAR_USTRORM_INTMEM +
5337 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338}
5339
5340static void bnx2x_init_internal_port(struct bnx2x *bp)
5341{
5342 int port = BP_PORT(bp);
5343
Eilon Greensteinca003922009-08-12 22:53:28 -07005344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346 REG_WR(bp,
5347 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005348 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350}
5351
5352static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005353{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005354 struct tstorm_eth_function_common_config tstorm_config = {0};
5355 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005356 int port = BP_PORT(bp);
5357 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005358 int i, j;
5359 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005360 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005361
5362 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005363 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005364 tstorm_config.rss_result_mask = MULTI_MASK;
5365 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005366
5367 /* Enable TPA if needed */
5368 if (bp->flags & TPA_ENABLE_FLAG)
5369 tstorm_config.config_flags |=
5370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005372 if (IS_E1HMF(bp))
5373 tstorm_config.config_flags |=
5374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005375
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005376 tstorm_config.leading_client_id = BP_L_ID(bp);
5377
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005378 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005379 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005380 (*(u32 *)&tstorm_config));
5381
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005384 bnx2x_set_storm_rx_mode(bp);
5385
Eilon Greensteinde832a52009-02-12 08:36:33 +00005386 for_each_queue(bp, i) {
5387 u8 cl_id = bp->fp[i].cl_id;
5388
5389 /* reset xstorm per client statistics */
5390 offset = BAR_XSTRORM_INTMEM +
5391 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392 for (j = 0;
5393 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394 REG_WR(bp, offset + j*4, 0);
5395
5396 /* reset tstorm per client statistics */
5397 offset = BAR_TSTRORM_INTMEM +
5398 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399 for (j = 0;
5400 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401 REG_WR(bp, offset + j*4, 0);
5402
5403 /* reset ustorm per client statistics */
5404 offset = BAR_USTRORM_INTMEM +
5405 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406 for (j = 0;
5407 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005409 }
5410
5411 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005412 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005413
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005415 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005416 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005417 ((u32 *)&stats_flags)[1]);
5418
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005420 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005421 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005422 ((u32 *)&stats_flags)[1]);
5423
Eilon Greensteinde832a52009-02-12 08:36:33 +00005424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425 ((u32 *)&stats_flags)[0]);
5426 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427 ((u32 *)&stats_flags)[1]);
5428
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005430 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005431 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005432 ((u32 *)&stats_flags)[1]);
5433
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005434 REG_WR(bp, BAR_XSTRORM_INTMEM +
5435 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437 REG_WR(bp, BAR_XSTRORM_INTMEM +
5438 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444 REG_WR(bp, BAR_TSTRORM_INTMEM +
5445 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005447
Eilon Greensteinde832a52009-02-12 08:36:33 +00005448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005455 if (CHIP_IS_E1H(bp)) {
5456 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463 IS_E1HMF(bp));
5464
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005465 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005467 }
5468
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005469 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470 max_agg_size =
5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5473 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005474 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005475 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005476
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005478 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005479 U64_LO(fp->rx_comp_mapping));
5480 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005481 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005482 U64_HI(fp->rx_comp_mapping));
5483
Eilon Greensteinca003922009-08-12 22:53:28 -07005484 /* Next page */
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005492 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005493 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005494 max_agg_size);
5495 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005496
Eilon Greenstein1c063282009-02-12 08:36:43 +00005497 /* dropless flow control */
5498 if (CHIP_IS_E1H(bp)) {
5499 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501 rx_pause.bd_thr_low = 250;
5502 rx_pause.cqe_thr_low = 250;
5503 rx_pause.cos = 1;
5504 rx_pause.sge_thr_low = 0;
5505 rx_pause.bd_thr_high = 350;
5506 rx_pause.cqe_thr_high = 350;
5507 rx_pause.sge_thr_high = 0;
5508
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005509 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005510 struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512 if (!fp->disable_tpa) {
5513 rx_pause.sge_thr_low = 150;
5514 rx_pause.sge_thr_high = 250;
5515 }
5516
5517
5518 offset = BAR_USTRORM_INTMEM +
5519 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520 fp->cl_id);
5521 for (j = 0;
5522 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523 j++)
5524 REG_WR(bp, offset + j*4,
5525 ((u32 *)&rx_pause)[j]);
5526 }
5527 }
5528
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005529 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531 /* Init rate shaping and fairness contexts */
5532 if (IS_E1HMF(bp)) {
5533 int vn;
5534
5535 /* During init there is no active link
5536 Until link is up, set link rate to 10Gbps */
5537 bp->link_vars.line_speed = SPEED_10000;
5538 bnx2x_init_port_minmax(bp);
5539
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005540 if (!BP_NOMCP(bp))
5541 bp->mf_config =
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005543 bnx2x_calc_vn_weight_sum(bp);
5544
5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005549 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005551
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005552 } else {
5553 /* rate shaping and fairness are disabled */
5554 DP(NETIF_MSG_IFUP,
5555 "single function mode minmax will be disabled\n");
5556 }
5557
5558
5559 /* Store it to internal memory */
5560 if (bp->port.pmf)
5561 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562 REG_WR(bp, BAR_XSTRORM_INTMEM +
5563 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005565}
5566
Eilon Greenstein471de712008-08-13 15:49:35 -07005567static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568{
5569 switch (load_code) {
5570 case FW_MSG_CODE_DRV_LOAD_COMMON:
5571 bnx2x_init_internal_common(bp);
5572 /* no break */
5573
5574 case FW_MSG_CODE_DRV_LOAD_PORT:
5575 bnx2x_init_internal_port(bp);
5576 /* no break */
5577
5578 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579 bnx2x_init_internal_func(bp);
5580 break;
5581
5582 default:
5583 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584 break;
5585 }
5586}
5587
5588static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005589{
5590 int i;
5591
5592 for_each_queue(bp, i) {
5593 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005595 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005596 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005597 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005598 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005599#ifdef BCM_CNIC
5600 fp->sb_id = fp->cl_id + 1;
5601#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005602 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005603#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005604 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005607 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005608 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005609 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005610 }
5611
Eilon Greenstein16119782009-03-02 07:59:27 +00005612 /* ensure status block indices were read */
5613 rmb();
5614
5615
Eilon Greenstein5c862842008-08-13 15:51:48 -07005616 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617 DEF_SB_ID);
5618 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005619 bnx2x_update_coalesce(bp);
5620 bnx2x_init_rx_rings(bp);
5621 bnx2x_init_tx_ring(bp);
5622 bnx2x_init_sp_ring(bp);
5623 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005624 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005625 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005626 bnx2x_stats_init(bp);
5627
5628 /* At this point, we are ready for interrupts */
5629 atomic_set(&bp->intr_sem, 0);
5630
5631 /* flush all before enabling interrupts */
5632 mb();
5633 mmiowb();
5634
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005635 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005636
5637 /* Check for SPIO5 */
5638 bnx2x_attn_int_deasserted0(bp,
5639 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005641}
5642
5643/* end of nic init */
5644
5645/*
5646 * gzip service functions
5647 */
5648
5649static int bnx2x_gunzip_init(struct bnx2x *bp)
5650{
5651 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652 &bp->gunzip_mapping);
5653 if (bp->gunzip_buf == NULL)
5654 goto gunzip_nomem1;
5655
5656 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657 if (bp->strm == NULL)
5658 goto gunzip_nomem2;
5659
5660 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661 GFP_KERNEL);
5662 if (bp->strm->workspace == NULL)
5663 goto gunzip_nomem3;
5664
5665 return 0;
5666
5667gunzip_nomem3:
5668 kfree(bp->strm);
5669 bp->strm = NULL;
5670
5671gunzip_nomem2:
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5675
5676gunzip_nomem1:
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005678 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005679 return -ENOMEM;
5680}
5681
5682static void bnx2x_gunzip_end(struct bnx2x *bp)
5683{
5684 kfree(bp->strm->workspace);
5685
5686 kfree(bp->strm);
5687 bp->strm = NULL;
5688
5689 if (bp->gunzip_buf) {
5690 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691 bp->gunzip_mapping);
5692 bp->gunzip_buf = NULL;
5693 }
5694}
5695
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005696static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005697{
5698 int n, rc;
5699
5700 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005701 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005703 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005704 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005705
5706 n = 10;
5707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005708#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005709
5710 if (zbuf[3] & FNAME)
5711 while ((zbuf[n++] != 0) && (n < len));
5712
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005713 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005714 bp->strm->avail_in = len - n;
5715 bp->strm->next_out = bp->gunzip_buf;
5716 bp->strm->avail_out = FW_BUF_SIZE;
5717
5718 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719 if (rc != Z_OK)
5720 return rc;
5721
5722 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg);
5726
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730 " gunzip_outlen (%d) not aligned\n",
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2;
5733
5734 zlib_inflateEnd(bp->strm);
5735
5736 if (rc == Z_STREAM_END)
5737 return 0;
5738
5739 return rc;
5740}
5741
5742/* nic load/unload */
5743
5744/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005745 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005746 */
5747
5748/* send a NIG loopback debug packet */
5749static void bnx2x_lb_pckt(struct bnx2x *bp)
5750{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005751 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005752
5753 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005754 wb_write[0] = 0x55555555;
5755 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005756 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005757 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005758
5759 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005760 wb_write[0] = 0x09000000;
5761 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005762 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005763 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005764}
5765
5766/* some of the internal memories
5767 * are not directly readable from the driver
5768 * to test them we send debug packets
5769 */
5770static int bnx2x_int_mem_test(struct bnx2x *bp)
5771{
5772 int factor;
5773 int count, i;
5774 u32 val = 0;
5775
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005776 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005777 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005778 else if (CHIP_REV_IS_EMUL(bp))
5779 factor = 200;
5780 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005781 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005782
5783 DP(NETIF_MSG_HW, "start part1\n");
5784
5785 /* Disable inputs of parser neighbor blocks */
5786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005790
5791 /* Write 0 to parser credits for CFC search request */
5792 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794 /* send Ethernet packet */
5795 bnx2x_lb_pckt(bp);
5796
5797 /* TODO do i reset NIG statistic? */
5798 /* Wait until NIG register shows 1 packet of size 0x10 */
5799 count = 1000 * factor;
5800 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005804 if (val == 0x10)
5805 break;
5806
5807 msleep(10);
5808 count--;
5809 }
5810 if (val != 0x10) {
5811 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5812 return -1;
5813 }
5814
5815 /* Wait until PRS register shows 1 packet */
5816 count = 1000 * factor;
5817 while (count) {
5818 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819 if (val == 1)
5820 break;
5821
5822 msleep(10);
5823 count--;
5824 }
5825 if (val != 0x1) {
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827 return -2;
5828 }
5829
5830 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005831 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005832 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005833 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005834 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005835 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005837
5838 DP(NETIF_MSG_HW, "part2\n");
5839
5840 /* Disable inputs of parser neighbor blocks */
5841 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005844 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005845
5846 /* Write 0 to parser credits for CFC search request */
5847 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849 /* send 10 Ethernet packets */
5850 for (i = 0; i < 10; i++)
5851 bnx2x_lb_pckt(bp);
5852
5853 /* Wait until NIG register shows 10 + 1
5854 packets of size 11*0x10 = 0xb0 */
5855 count = 1000 * factor;
5856 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005860 if (val == 0xb0)
5861 break;
5862
5863 msleep(10);
5864 count--;
5865 }
5866 if (val != 0xb0) {
5867 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5868 return -3;
5869 }
5870
5871 /* Wait until PRS register shows 2 packets */
5872 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873 if (val != 2)
5874 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5875
5876 /* Write 1 to parser credits for CFC search request */
5877 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879 /* Wait until PRS register shows 3 packets */
5880 msleep(10 * factor);
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883 if (val != 3)
5884 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5885
5886 /* clear NIG EOP FIFO */
5887 for (i = 0; i < 11; i++)
5888 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890 if (val != 1) {
5891 BNX2X_ERR("clear of NIG failed\n");
5892 return -4;
5893 }
5894
5895 /* Reset and init BRB, PRS, NIG */
5896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897 msleep(50);
5898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005902#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005903 /* set NIC mode */
5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905#endif
5906
5907 /* Enable inputs of parser neighbor blocks */
5908 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005911 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005912
5913 DP(NETIF_MSG_HW, "done\n");
5914
5915 return 0; /* OK */
5916}
5917
5918static void enable_blocks_attention(struct bnx2x *bp)
5919{
5920 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005929/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005931 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005934/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005936 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005940/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942 if (CHIP_REV_IS_FPGA(bp))
5943 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944 else
5945 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005949/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005951 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005953/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005955}
5956
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005957
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005958static void bnx2x_reset_common(struct bnx2x *bp)
5959{
5960 /* reset_common */
5961 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962 0xd3ffff7f);
5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964}
5965
Eilon Greenstein573f2032009-08-12 08:24:14 +00005966static void bnx2x_init_pxp(struct bnx2x *bp)
5967{
5968 u16 devctl;
5969 int r_order, w_order;
5970
5971 pci_read_config_word(bp->pdev,
5972 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975 if (bp->mrrs == -1)
5976 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977 else {
5978 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979 r_order = bp->mrrs;
5980 }
5981
5982 bnx2x_init_pxp_arb(bp, r_order, w_order);
5983}
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00005984
5985static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986{
5987 u32 val;
5988 u8 port;
5989 u8 is_required = 0;
5990
5991 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992 SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995 is_required = 1;
5996
5997 /*
5998 * The fan failure mechanism is usually related to the PHY type since
5999 * the power consumption of the board is affected by the PHY. Currently,
6000 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001 */
6002 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003 for (port = PORT_0; port < PORT_MAX; port++) {
6004 u32 phy_type =
6005 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006 external_phy_config) &
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008 is_required |=
6009 ((phy_type ==
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006012 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013 (phy_type ==
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015 }
6016
6017 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019 if (is_required == 0)
6020 return;
6021
6022 /* Fan failure is indicated by SPIO 5 */
6023 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026 /* set to active low mode */
6027 val = REG_RD(bp, MISC_REG_SPIO_INT);
6028 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030 REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032 /* enable interrupt to signal the IGU */
6033 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034 val |= (1 << MISC_REGISTERS_SPIO_5);
6035 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036}
6037
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006038static int bnx2x_init_common(struct bnx2x *bp)
6039{
6040 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006041#ifdef BCM_CNIC
6042 u32 wb_write[2];
6043#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044
6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6046
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006047 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6050
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006051 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006052 if (CHIP_IS_E1H(bp))
6053 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6054
6055 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056 msleep(30);
6057 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006059 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006060 if (CHIP_IS_E1(bp)) {
6061 /* enable HW interrupt from PXP on USDM overflow
6062 bit 16 on INT_MASK_0 */
6063 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006064 }
6065
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006066 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006067 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068
6069#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006070 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006075 /* make sure this value is 0 */
6076 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006078/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006083#endif
6084
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006086#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006090#endif
6091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006092 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006095 /* let the HW do it's magic ... */
6096 msleep(100);
6097 /* finish PXP init */
6098 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099 if (val != 1) {
6100 BNX2X_ERR("PXP2 CFG failed\n");
6101 return -EBUSY;
6102 }
6103 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104 if (val != 1) {
6105 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106 return -EBUSY;
6107 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006109 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006112 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006114 /* clean the DMAE memory */
6115 bp->dmae_ready = 1;
6116 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006118 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006123 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006129
6130#ifdef BCM_CNIC
6131 wb_write[0] = 0;
6132 wb_write[1] = 0;
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140 wb_write, 2);
6141 }
6142 }
6143#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006144 /* soft reset pulse */
6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147
Michael Chan37b091b2009-10-10 13:46:55 +00006148#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006150#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006152 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006153 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154 if (!CHIP_REV_IS_SLOW(bp)) {
6155 /* enable hw interrupt from doorbell Q */
6156 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006158
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006162#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006163 /* set NIC mode */
6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006165#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006166 if (CHIP_IS_E1H(bp))
6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006168
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006173
Eilon Greensteinca003922009-08-12 22:53:28 -07006174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006178
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006183
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006184 /* sync semi rtc */
6185 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186 0x80000000);
6187 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006189
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006190 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006194 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196 REG_WR(bp, i, 0xc0cac01a);
6197 /* TODO: replace with something meaningful */
6198 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006200#ifdef BCM_CNIC
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006213
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006214 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of"
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006218
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006220 val = (4 << 24) + (0 << 12) + 1024;
6221 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006222
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006223 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006224 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006225 /* enable context validation interrupt from CFC */
6226 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228 /* set the thresholds to prevent CFC/CDU race */
6229 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006230
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006231 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006233
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006234 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006235 /* Reset PCIE errors for debug */
6236 REG_WR(bp, 0x2814, 0xffffffff);
6237 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006238
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006239 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006240 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006241 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006242 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006243
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006244 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006245 if (CHIP_IS_E1H(bp)) {
6246 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006250 if (CHIP_REV_IS_SLOW(bp))
6251 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006253 /* finish CFC init */
6254 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255 if (val != 1) {
6256 BNX2X_ERR("CFC LL_INIT failed\n");
6257 return -EBUSY;
6258 }
6259 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260 if (val != 1) {
6261 BNX2X_ERR("CFC AC_INIT failed\n");
6262 return -EBUSY;
6263 }
6264 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265 if (val != 1) {
6266 BNX2X_ERR("CFC CAM_INIT failed\n");
6267 return -EBUSY;
6268 }
6269 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006270
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006271 /* read NIG statistic
6272 to see if this is our first up since powerup */
6273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006275
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006276 /* do internal memory self test */
6277 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278 BNX2X_ERR("internal mem self test failed\n");
6279 return -EBUSY;
6280 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006281
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006282 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006287 bp->port.need_hw_lock = 1;
6288 break;
6289
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006290 default:
6291 break;
6292 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006293
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006294 bnx2x_setup_fan_failure_detection(bp);
6295
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006296 /* clear PXP2 attentions */
6297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006298
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006299 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006300
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006301 if (!BP_NOMCP(bp)) {
6302 bnx2x_acquire_phy_lock(bp);
6303 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304 bnx2x_release_phy_lock(bp);
6305 } else
6306 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006308 return 0;
6309}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006310
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006311static int bnx2x_init_port(struct bnx2x *bp)
6312{
6313 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006314 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006315 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006316 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006317
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006318 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6319
6320 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006321
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006322 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006323 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006324
6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006329
Michael Chan37b091b2009-10-10 13:46:55 +00006330#ifdef BCM_CNIC
6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006332
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006336#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006338
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006339 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006340 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341 /* no pause for emulation and FPGA */
6342 low = 0;
6343 high = 513;
6344 } else {
6345 if (IS_E1HMF(bp))
6346 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347 else if (bp->dev->mtu > 4096) {
6348 if (bp->flags & ONE_PORT_FLAG)
6349 low = 160;
6350 else {
6351 val = bp->dev->mtu;
6352 /* (24*1024 + val*4)/256 */
6353 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354 }
6355 } else
6356 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357 high = low + 56; /* 14*1024/256 */
6358 }
6359 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006363 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006364
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006365 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006366 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006367 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006368 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006369
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006370 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006374
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006375 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006376 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006377
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006378 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006379
6380 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006381 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006382
6383 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006384 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006385 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006386 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006387
6388 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006389 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006390 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006392
Michael Chan37b091b2009-10-10 13:46:55 +00006393#ifdef BCM_CNIC
6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006395#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006398
6399 if (CHIP_IS_E1(bp)) {
6400 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006403 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006404
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006405 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006406 /* init aeu_mask_attn_func_0/1:
6407 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409 * bits 4-7 are used for "per vn group attention" */
6410 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006413 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006414 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006415 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006416 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006417 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006418
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006419 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006420
6421 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006424 /* 0x2 disable e1hov, 0x1 enable */
6425 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426 (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
Eilon Greenstein1c063282009-02-12 08:36:43 +00006428 {
6429 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006433 }
6434
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006435 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006436 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006437
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006438 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440 {
6441 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446 /* The GPIO should be swapped if the swap register is
6447 set and active */
6448 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451 /* Select function upon port-swap configuration */
6452 if (port == 0) {
6453 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454 aeu_gpio_mask = (swap_val && swap_override) ?
6455 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457 } else {
6458 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459 aeu_gpio_mask = (swap_val && swap_override) ?
6460 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462 }
6463 val = REG_RD(bp, offset);
6464 /* add GPIO3 to group */
6465 val |= aeu_gpio_mask;
6466 REG_WR(bp, offset, val);
6467 }
6468 break;
6469
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006472 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006473 {
6474 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006477 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006478 REG_WR(bp, reg_addr, val);
6479 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006480 break;
6481
6482 default:
6483 break;
6484 }
6485
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006486 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006488 return 0;
6489}
6490
6491#define ILT_PER_FUNC (768/2)
6492#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6493/* the phys address is shifted right 12 bits and has an added
6494 1=valid bit added to the 53rd bit
6495 then since this is a wide register(TM)
6496 we split it into two 32 bit writes
6497 */
6498#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6500#define PXP_ONE_ILT(x) (((x) << 10) | x)
6501#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6502
Michael Chan37b091b2009-10-10 13:46:55 +00006503#ifdef BCM_CNIC
6504#define CNIC_ILT_LINES 127
6505#define CNIC_CTX_PER_ILT 16
6506#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006507#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006508#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006509
6510static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511{
6512 int reg;
6513
6514 if (CHIP_IS_E1H(bp))
6515 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516 else /* E1 */
6517 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520}
6521
6522static int bnx2x_init_func(struct bnx2x *bp)
6523{
6524 int port = BP_PORT(bp);
6525 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006526 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006527 int i;
6528
6529 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6530
Eilon Greenstein8badd272009-02-12 08:36:15 +00006531 /* set MSI reconfigure capability */
6532 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533 val = REG_RD(bp, addr);
6534 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535 REG_WR(bp, addr, val);
6536
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006537 i = FUNC_ILT_BASE(func);
6538
6539 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540 if (CHIP_IS_E1H(bp)) {
6541 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543 } else /* E1 */
6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
Michael Chan37b091b2009-10-10 13:46:55 +00006547#ifdef BCM_CNIC
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550 if (CHIP_IS_E1(bp))
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552 else {
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555 }
6556
6557 i++;
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573 }
6574
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006587
6588 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006598
6599 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601 }
6602
6603 /* HC init per function */
6604 if (CHIP_IS_E1H(bp)) {
6605 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006610 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006611
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006612 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006613 REG_WR(bp, 0x2114, 0xffffffff);
6614 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006615
6616 return 0;
6617}
6618
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006619static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620{
6621 int i, rc = 0;
6622
6623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6624 BP_FUNC(bp), load_code);
6625
6626 bp->dmae_ready = 0;
6627 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006628 rc = bnx2x_gunzip_init(bp);
6629 if (rc)
6630 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006631
6632 switch (load_code) {
6633 case FW_MSG_CODE_DRV_LOAD_COMMON:
6634 rc = bnx2x_init_common(bp);
6635 if (rc)
6636 goto init_hw_err;
6637 /* no break */
6638
6639 case FW_MSG_CODE_DRV_LOAD_PORT:
6640 bp->dmae_ready = 1;
6641 rc = bnx2x_init_port(bp);
6642 if (rc)
6643 goto init_hw_err;
6644 /* no break */
6645
6646 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647 bp->dmae_ready = 1;
6648 rc = bnx2x_init_func(bp);
6649 if (rc)
6650 goto init_hw_err;
6651 break;
6652
6653 default:
6654 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655 break;
6656 }
6657
6658 if (!BP_NOMCP(bp)) {
6659 int func = BP_FUNC(bp);
6660
6661 bp->fw_drv_pulse_wr_seq =
6662 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006664 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666
6667 /* this needs to be done before gunzip end */
6668 bnx2x_zero_def_sb(bp);
6669 for_each_queue(bp, i)
6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006671#ifdef BCM_CNIC
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006674
6675init_hw_err:
6676 bnx2x_gunzip_end(bp);
6677
6678 return rc;
6679}
6680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006681static void bnx2x_free_mem(struct bnx2x *bp)
6682{
6683
6684#define BNX2X_PCI_FREE(x, y, size) \
6685 do { \
6686 if (x) { \
6687 pci_free_consistent(bp->pdev, size, x, y); \
6688 x = NULL; \
6689 y = 0; \
6690 } \
6691 } while (0)
6692
6693#define BNX2X_FREE(x) \
6694 do { \
6695 if (x) { \
6696 vfree(x); \
6697 x = NULL; \
6698 } \
6699 } while (0)
6700
6701 int i;
6702
6703 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006704 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006705 for_each_queue(bp, i) {
6706
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006707 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006708 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006710 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006711 }
6712 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006713 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006714
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718 bnx2x_fp(bp, i, rx_desc_mapping),
6719 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722 bnx2x_fp(bp, i, rx_comp_mapping),
6723 sizeof(struct eth_fast_path_rx_cqe) *
6724 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006725
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006726 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006727 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006728 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729 bnx2x_fp(bp, i, rx_sge_mapping),
6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006732 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006733 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006734
6735 /* fastpath tx rings: tx_buf tx_desc */
6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006739 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006740 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006741 /* end of fastpath */
6742
6743 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006744 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006745
6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006747 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006748
Michael Chan37b091b2009-10-10 13:46:55 +00006749#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006756#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006758
6759#undef BNX2X_PCI_FREE
6760#undef BNX2X_KFREE
6761}
6762
6763static int bnx2x_alloc_mem(struct bnx2x *bp)
6764{
6765
6766#define BNX2X_PCI_ALLOC(x, y, size) \
6767 do { \
6768 x = pci_alloc_consistent(bp->pdev, size, y); \
6769 if (x == NULL) \
6770 goto alloc_mem_err; \
6771 memset(x, 0, size); \
6772 } while (0)
6773
6774#define BNX2X_ALLOC(x, size) \
6775 do { \
6776 x = vmalloc(size); \
6777 if (x == NULL) \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6780 } while (0)
6781
6782 int i;
6783
6784 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006785 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786 for_each_queue(bp, i) {
6787 bnx2x_fp(bp, i, bp) = bp;
6788
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006789 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006790 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006792 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006793 }
6794 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006795 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006796
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801 &bnx2x_fp(bp, i, rx_desc_mapping),
6802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805 &bnx2x_fp(bp, i, rx_comp_mapping),
6806 sizeof(struct eth_fast_path_rx_cqe) *
6807 NUM_RCQ_BD);
6808
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006809 /* SGE ring */
6810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813 &bnx2x_fp(bp, i, rx_sge_mapping),
6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006815 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006816 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006817 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006818
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006819 /* fastpath tx rings: tx_buf tx_desc */
6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006824 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006825 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006826 /* end of fastpath */
6827
6828 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829 sizeof(struct host_def_status_block));
6830
6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832 sizeof(struct bnx2x_slowpath));
6833
Michael Chan37b091b2009-10-10 13:46:55 +00006834#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006837 /* allocate searcher T2 table
6838 we allocate 1/4 of alloc num for T2
6839 (which is not entered into the ILT) */
6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
Michael Chan37b091b2009-10-10 13:46:55 +00006842 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006843 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006845
Michael Chan37b091b2009-10-10 13:46:55 +00006846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849 /* QM queues (128*MAX_CONN) */
6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006851
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006854#endif
6855
6856 /* Slow path ring */
6857 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859 return 0;
6860
6861alloc_mem_err:
6862 bnx2x_free_mem(bp);
6863 return -ENOMEM;
6864
6865#undef BNX2X_PCI_ALLOC
6866#undef BNX2X_ALLOC
6867}
6868
6869static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870{
6871 int i;
6872
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006873 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006874 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876 u16 bd_cons = fp->tx_bd_cons;
6877 u16 sw_prod = fp->tx_pkt_prod;
6878 u16 sw_cons = fp->tx_pkt_cons;
6879
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006880 while (sw_cons != sw_prod) {
6881 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882 sw_cons++;
6883 }
6884 }
6885}
6886
6887static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888{
6889 int i, j;
6890
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006891 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006892 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006894 for (i = 0; i < NUM_RX_BD; i++) {
6895 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896 struct sk_buff *skb = rx_buf->skb;
6897
6898 if (skb == NULL)
6899 continue;
6900
6901 pci_unmap_single(bp->pdev,
6902 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006903 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006904
6905 rx_buf->skb = NULL;
6906 dev_kfree_skb(skb);
6907 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006908 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006909 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006911 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006912 }
6913}
6914
6915static void bnx2x_free_skbs(struct bnx2x *bp)
6916{
6917 bnx2x_free_tx_skbs(bp);
6918 bnx2x_free_rx_skbs(bp);
6919}
6920
6921static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006923 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006924
6925 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006927 bp->msix_table[0].vector);
6928
Michael Chan37b091b2009-10-10 13:46:55 +00006929#ifdef BCM_CNIC
6930 offset++;
6931#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006932 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006934 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006935 bnx2x_fp(bp, i, state));
6936
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006937 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006938 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939}
6940
6941static void bnx2x_free_irq(struct bnx2x *bp)
6942{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006943 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006944 bnx2x_free_msix_irqs(bp);
6945 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006946 bp->flags &= ~USING_MSIX_FLAG;
6947
Eilon Greenstein8badd272009-02-12 08:36:15 +00006948 } else if (bp->flags & USING_MSI_FLAG) {
6949 free_irq(bp->pdev->irq, bp->dev);
6950 pci_disable_msi(bp->pdev);
6951 bp->flags &= ~USING_MSI_FLAG;
6952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006953 } else
6954 free_irq(bp->pdev->irq, bp->dev);
6955}
6956
6957static int bnx2x_enable_msix(struct bnx2x *bp)
6958{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006959 int i, rc, offset = 1;
6960 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006961
Eilon Greenstein8badd272009-02-12 08:36:15 +00006962 bp->msix_table[0].entry = igu_vec;
6963 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006964
Michael Chan37b091b2009-10-10 13:46:55 +00006965#ifdef BCM_CNIC
6966 igu_vec = BP_L_ID(bp) + offset;
6967 bp->msix_table[1].entry = igu_vec;
6968 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6969 offset++;
6970#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006971 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006972 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006973 bp->msix_table[i + offset].entry = igu_vec;
6974 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6975 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006976 }
6977
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006979 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006980 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006981 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6982 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006983 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006985 bp->flags |= USING_MSIX_FLAG;
6986
6987 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006988}
6989
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006990static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6991{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006992 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006993
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006994 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6995 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006996 if (rc) {
6997 BNX2X_ERR("request sp irq failed\n");
6998 return -EBUSY;
6999 }
7000
Michael Chan37b091b2009-10-10 13:46:55 +00007001#ifdef BCM_CNIC
7002 offset++;
7003#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007004 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007005 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007006 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7007 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007009 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007010 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007011 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007012 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007013 bnx2x_free_msix_irqs(bp);
7014 return -EBUSY;
7015 }
7016
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007017 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007018 }
7019
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007020 i = BNX2X_NUM_QUEUES(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007021 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7022 " ... fp[%d] %d\n",
7023 bp->dev->name, bp->msix_table[0].vector,
7024 0, bp->msix_table[offset].vector,
7025 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007026
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007027 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007028}
7029
Eilon Greenstein8badd272009-02-12 08:36:15 +00007030static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007031{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007032 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007033
Eilon Greenstein8badd272009-02-12 08:36:15 +00007034 rc = pci_enable_msi(bp->pdev);
7035 if (rc) {
7036 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7037 return -1;
7038 }
7039 bp->flags |= USING_MSI_FLAG;
7040
7041 return 0;
7042}
7043
7044static int bnx2x_req_irq(struct bnx2x *bp)
7045{
7046 unsigned long flags;
7047 int rc;
7048
7049 if (bp->flags & USING_MSI_FLAG)
7050 flags = 0;
7051 else
7052 flags = IRQF_SHARED;
7053
7054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007055 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007056 if (!rc)
7057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7058
7059 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007060}
7061
Yitchak Gertner65abd742008-08-25 15:26:24 -07007062static void bnx2x_napi_enable(struct bnx2x *bp)
7063{
7064 int i;
7065
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007066 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007067 napi_enable(&bnx2x_fp(bp, i, napi));
7068}
7069
7070static void bnx2x_napi_disable(struct bnx2x *bp)
7071{
7072 int i;
7073
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007074 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007075 napi_disable(&bnx2x_fp(bp, i, napi));
7076}
7077
7078static void bnx2x_netif_start(struct bnx2x *bp)
7079{
Eilon Greensteine1510702009-07-21 05:47:41 +00007080 int intr_sem;
7081
7082 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7083 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7084
7085 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007086 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007087 bnx2x_napi_enable(bp);
7088 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007089 if (bp->state == BNX2X_STATE_OPEN)
7090 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007091 }
7092 }
7093}
7094
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007095static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007096{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007097 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007098 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007099 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007100}
7101
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007102/*
7103 * Init service functions
7104 */
7105
Michael Chane665bfd2009-10-10 13:46:54 +00007106/**
7107 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7108 *
7109 * @param bp driver descriptor
7110 * @param set set or clear an entry (1 or 0)
7111 * @param mac pointer to a buffer containing a MAC
7112 * @param cl_bit_vec bit vector of clients to register a MAC for
7113 * @param cam_offset offset in a CAM to use
7114 * @param with_bcast set broadcast MAC as well
7115 */
7116static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7117 u32 cl_bit_vec, u8 cam_offset,
7118 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007119{
7120 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007121 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007122
7123 /* CAM allocation
7124 * unicasts 0-31:port0 32-63:port1
7125 * multicast 64-127:port0 128-191:port1
7126 */
Michael Chane665bfd2009-10-10 13:46:54 +00007127 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7128 config->hdr.offset = cam_offset;
7129 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007130 config->hdr.reserved1 = 0;
7131
7132 /* primary MAC */
7133 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007134 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007135 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007136 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007137 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007138 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007139 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007140 if (set)
7141 config->config_table[0].target_table_entry.flags = 0;
7142 else
7143 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007144 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007145 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007146 config->config_table[0].target_table_entry.vlan_id = 0;
7147
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007148 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7149 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007150 config->config_table[0].cam_entry.msb_mac_addr,
7151 config->config_table[0].cam_entry.middle_mac_addr,
7152 config->config_table[0].cam_entry.lsb_mac_addr);
7153
7154 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007155 if (with_bcast) {
7156 config->config_table[1].cam_entry.msb_mac_addr =
7157 cpu_to_le16(0xffff);
7158 config->config_table[1].cam_entry.middle_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.lsb_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7163 if (set)
7164 config->config_table[1].target_table_entry.flags =
7165 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7166 else
7167 CAM_INVALIDATE(config->config_table[1]);
7168 config->config_table[1].target_table_entry.clients_bit_vector =
7169 cpu_to_le32(cl_bit_vec);
7170 config->config_table[1].target_table_entry.vlan_id = 0;
7171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007172
7173 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7174 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7175 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7176}
7177
Michael Chane665bfd2009-10-10 13:46:54 +00007178/**
7179 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7180 *
7181 * @param bp driver descriptor
7182 * @param set set or clear an entry (1 or 0)
7183 * @param mac pointer to a buffer containing a MAC
7184 * @param cl_bit_vec bit vector of clients to register a MAC for
7185 * @param cam_offset offset in a CAM to use
7186 */
7187static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7188 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007189{
7190 struct mac_configuration_cmd_e1h *config =
7191 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7192
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007193 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007194 config->hdr.offset = cam_offset;
7195 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007196 config->hdr.reserved1 = 0;
7197
7198 /* primary MAC */
7199 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007200 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007201 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007202 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007203 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007204 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007205 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007206 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007207 config->config_table[0].vlan_id = 0;
7208 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007209 if (set)
7210 config->config_table[0].flags = BP_PORT(bp);
7211 else
7212 config->config_table[0].flags =
7213 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007214
Michael Chane665bfd2009-10-10 13:46:54 +00007215 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007216 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007217 config->config_table[0].msb_mac_addr,
7218 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007219 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007220
7221 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7222 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7223 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7224}
7225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007226static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7227 int *state_p, int poll)
7228{
7229 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007230 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007231
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007232 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7233 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007234
7235 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007236 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007237 if (poll) {
7238 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007239 /* if index is different from 0
7240 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007241 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007242 */
7243 if (idx)
7244 bnx2x_rx_int(&bp->fp[idx], 10);
7245 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007246
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007247 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007248 if (*state_p == state) {
7249#ifdef BNX2X_STOP_ON_ERROR
7250 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7251#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007252 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007253 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007254
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007255 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007256
7257 if (bp->panic)
7258 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007259 }
7260
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007261 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007262 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7263 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007264#ifdef BNX2X_STOP_ON_ERROR
7265 bnx2x_panic();
7266#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007267
Eliezer Tamir49d66772008-02-28 11:53:13 -08007268 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007269}
7270
Michael Chane665bfd2009-10-10 13:46:54 +00007271static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7272{
7273 bp->set_mac_pending++;
7274 smp_wmb();
7275
7276 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7277 (1 << bp->fp->cl_id), BP_FUNC(bp));
7278
7279 /* Wait for a completion */
7280 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7281}
7282
7283static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7284{
7285 bp->set_mac_pending++;
7286 smp_wmb();
7287
7288 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7289 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7290 1);
7291
7292 /* Wait for a completion */
7293 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7294}
7295
Michael Chan993ac7b2009-10-10 13:46:56 +00007296#ifdef BCM_CNIC
7297/**
7298 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7299 * MAC(s). This function will wait until the ramdord completion
7300 * returns.
7301 *
7302 * @param bp driver handle
7303 * @param set set or clear the CAM entry
7304 *
7305 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7306 */
7307static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7308{
7309 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7310
7311 bp->set_mac_pending++;
7312 smp_wmb();
7313
7314 /* Send a SET_MAC ramrod */
7315 if (CHIP_IS_E1(bp))
7316 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7317 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7318 1);
7319 else
7320 /* CAM allocation for E1H
7321 * unicasts: by func number
7322 * multicast: 20+FUNC*20, 20 each
7323 */
7324 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7325 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7326
7327 /* Wait for a completion when setting */
7328 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7329
7330 return 0;
7331}
7332#endif
7333
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007334static int bnx2x_setup_leading(struct bnx2x *bp)
7335{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007336 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007337
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007338 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007339 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007340
7341 /* SETUP ramrod */
7342 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7343
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007344 /* Wait for completion */
7345 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007346
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007347 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007348}
7349
7350static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7351{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007352 struct bnx2x_fastpath *fp = &bp->fp[index];
7353
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007354 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007355 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007356
Eliezer Tamir228241e2008-02-28 11:56:57 -08007357 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007358 fp->state = BNX2X_FP_STATE_OPENING;
7359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7360 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007361
7362 /* Wait for completion */
7363 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007364 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007365}
7366
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007367static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007368
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007369static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370{
Eilon Greensteinca003922009-08-12 22:53:28 -07007371
7372 switch (bp->multi_mode) {
7373 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007374 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007375 break;
7376
7377 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007378 if (num_queues)
7379 bp->num_queues = min_t(u32, num_queues,
7380 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007381 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007382 bp->num_queues = min_t(u32, num_online_cpus(),
7383 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007384 break;
7385
7386
7387 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007388 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007389 break;
7390 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007391}
7392
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007393static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007394{
7395 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007396
Eilon Greenstein8badd272009-02-12 08:36:15 +00007397 switch (int_mode) {
7398 case INT_MODE_INTx:
7399 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007400 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007401 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007402 break;
7403
7404 case INT_MODE_MSIX:
7405 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007406 /* Set number of queues according to bp->multi_mode value */
7407 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007408
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007409 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7410 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007411
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007412 /* if we can't use MSI-X we only need one fp,
7413 * so try to enable MSI-X with the requested number of fp's
7414 * and fallback to MSI or legacy INTx with one fp
7415 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007416 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007417 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007418 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007419 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007420 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007421 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007422 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007423 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007424}
7425
Michael Chan993ac7b2009-10-10 13:46:56 +00007426#ifdef BCM_CNIC
7427static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7428static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7429#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007430
7431/* must be called with rtnl_lock */
7432static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7433{
7434 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007435 int i, rc;
7436
Eilon Greenstein8badd272009-02-12 08:36:15 +00007437#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007438 if (unlikely(bp->panic))
7439 return -EPERM;
7440#endif
7441
7442 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7443
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007444 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007445
7446 if (bnx2x_alloc_mem(bp))
7447 return -ENOMEM;
7448
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007449 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007450 bnx2x_fp(bp, i, disable_tpa) =
7451 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7452
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007453 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007454 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7455 bnx2x_poll, 128);
7456
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007457 bnx2x_napi_enable(bp);
7458
7459 if (bp->flags & USING_MSIX_FLAG) {
7460 rc = bnx2x_req_msix_irqs(bp);
7461 if (rc) {
7462 pci_disable_msix(bp->pdev);
7463 goto load_error1;
7464 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007465 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007466 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007467 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007468 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7469 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007470 bnx2x_ack_int(bp);
7471 rc = bnx2x_req_irq(bp);
7472 if (rc) {
7473 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007474 if (bp->flags & USING_MSI_FLAG)
7475 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007476 goto load_error1;
7477 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007478 if (bp->flags & USING_MSI_FLAG) {
7479 bp->dev->irq = bp->pdev->irq;
7480 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7481 bp->dev->name, bp->pdev->irq);
7482 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007483 }
7484
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007485 /* Send LOAD_REQUEST command to MCP
7486 Returns the type of LOAD command:
7487 if it is the first port to be initialized
7488 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007489 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007490 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007491 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7492 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007493 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007494 rc = -EBUSY;
7495 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007496 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007497 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7498 rc = -EBUSY; /* other port in diagnostic mode */
7499 goto load_error2;
7500 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007501
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007502 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007503 int port = BP_PORT(bp);
7504
Eilon Greensteinf5372252009-02-12 08:38:30 +00007505 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007506 load_count[0], load_count[1], load_count[2]);
7507 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007508 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007509 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007510 load_count[0], load_count[1], load_count[2]);
7511 if (load_count[0] == 1)
7512 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007513 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007514 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7515 else
7516 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007517 }
7518
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007519 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7520 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7521 bp->port.pmf = 1;
7522 else
7523 bp->port.pmf = 0;
7524 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7525
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007526 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007527 rc = bnx2x_init_hw(bp, load_code);
7528 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007529 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007530 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007531 }
7532
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007533 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007534 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007535
Eilon Greenstein2691d512009-08-12 08:22:08 +00007536 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7537 (bp->common.shmem2_base))
7538 SHMEM2_WR(bp, dcc_support,
7539 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7540 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7541
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007542 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007543 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007544 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7545 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007546 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007547 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007548 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007549 }
7550 }
7551
7552 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7553
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007554 rc = bnx2x_setup_leading(bp);
7555 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007556 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007557#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007558 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007559#else
7560 bp->panic = 1;
7561 return -EBUSY;
7562#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007563 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007564
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007565 if (CHIP_IS_E1H(bp))
7566 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007567 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007568 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007569 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007570
Eilon Greensteinca003922009-08-12 22:53:28 -07007571 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007572#ifdef BCM_CNIC
7573 /* Enable Timer scan */
7574 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7575#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007576 for_each_nondefault_queue(bp, i) {
7577 rc = bnx2x_setup_multi(bp, i);
7578 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007579#ifdef BCM_CNIC
7580 goto load_error4;
7581#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007582 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007583#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007585
Eilon Greensteinca003922009-08-12 22:53:28 -07007586 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007587 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007588 else
Michael Chane665bfd2009-10-10 13:46:54 +00007589 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007590#ifdef BCM_CNIC
7591 /* Set iSCSI L2 MAC */
7592 mutex_lock(&bp->cnic_mutex);
7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
Michael Chan4a6e47a2009-12-25 17:13:07 -08007596 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7597 CNIC_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00007598 }
7599 mutex_unlock(&bp->cnic_mutex);
7600#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007601 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007602
7603 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007604 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007605
7606 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007607 switch (load_mode) {
7608 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007609 if (bp->state == BNX2X_STATE_OPEN) {
7610 /* Tx queue should be only reenabled */
7611 netif_tx_wake_all_queues(bp->dev);
7612 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007613 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007614 bnx2x_set_rx_mode(bp->dev);
7615 break;
7616
7617 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007618 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007619 if (bp->state != BNX2X_STATE_OPEN)
7620 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007621 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007622 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007623 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007624
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007625 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007626 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007627 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007628 bp->state = BNX2X_STATE_DIAG;
7629 break;
7630
7631 default:
7632 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007633 }
7634
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007635 if (!bp->port.pmf)
7636 bnx2x__link_status_update(bp);
7637
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007638 /* start the timer */
7639 mod_timer(&bp->timer, jiffies + bp->current_interval);
7640
Michael Chan993ac7b2009-10-10 13:46:56 +00007641#ifdef BCM_CNIC
7642 bnx2x_setup_cnic_irq_info(bp);
7643 if (bp->state == BNX2X_STATE_OPEN)
7644 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7645#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007646
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007647 return 0;
7648
Michael Chan37b091b2009-10-10 13:46:55 +00007649#ifdef BCM_CNIC
7650load_error4:
7651 /* Disable Timer scan */
7652 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7653#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007654load_error3:
7655 bnx2x_int_disable_sync(bp, 1);
7656 if (!BP_NOMCP(bp)) {
7657 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7658 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7659 }
7660 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007661 /* Free SKBs, SGEs, TPA pool and driver internals */
7662 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007663 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007664 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007665load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007666 /* Release IRQs */
7667 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007668load_error1:
7669 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007670 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007671 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007672 bnx2x_free_mem(bp);
7673
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007674 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007675}
7676
7677static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7678{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007679 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007680 int rc;
7681
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007682 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007683 fp->state = BNX2X_FP_STATE_HALTING;
7684 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007685
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007686 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007687 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007688 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007689 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007690 return rc;
7691
7692 /* delete cfc entry */
7693 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7694
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007695 /* Wait for completion */
7696 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007697 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007698 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007699}
7700
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007701static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007702{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007703 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007704 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007705 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007706 int cnt = 500;
7707 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007708
7709 might_sleep();
7710
7711 /* Send HALT ramrod */
7712 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007715 /* Wait for completion */
7716 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7717 &(bp->fp[0].state), 1);
7718 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007719 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007720
Eliezer Tamir49d66772008-02-28 11:53:13 -08007721 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007722
Eliezer Tamir228241e2008-02-28 11:56:57 -08007723 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007724 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7725
Eliezer Tamir49d66772008-02-28 11:53:13 -08007726 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007727 we are going to reset the chip anyway
7728 so there is not much to do if this times out
7729 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007730 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007731 if (!cnt) {
7732 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7733 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7734 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7735#ifdef BNX2X_STOP_ON_ERROR
7736 bnx2x_panic();
7737#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007738 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007739 break;
7740 }
7741 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007742 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007743 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007744 }
7745 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7746 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007747
7748 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007749}
7750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007751static void bnx2x_reset_func(struct bnx2x *bp)
7752{
7753 int port = BP_PORT(bp);
7754 int func = BP_FUNC(bp);
7755 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007757 /* Configure IGU */
7758 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7759 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7760
Michael Chan37b091b2009-10-10 13:46:55 +00007761#ifdef BCM_CNIC
7762 /* Disable Timer scan */
7763 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7764 /*
7765 * Wait for at least 10ms and up to 2 second for the timers scan to
7766 * complete
7767 */
7768 for (i = 0; i < 200; i++) {
7769 msleep(10);
7770 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7771 break;
7772 }
7773#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007774 /* Clear ILT */
7775 base = FUNC_ILT_BASE(func);
7776 for (i = base; i < base + ILT_PER_FUNC; i++)
7777 bnx2x_ilt_wr(bp, i, 0);
7778}
7779
7780static void bnx2x_reset_port(struct bnx2x *bp)
7781{
7782 int port = BP_PORT(bp);
7783 u32 val;
7784
7785 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7786
7787 /* Do not rcv packets to BRB */
7788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7789 /* Do not direct rcv packets that are not for MCP to the BRB */
7790 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7792
7793 /* Configure AEU */
7794 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7795
7796 msleep(100);
7797 /* Check for BRB port occupancy */
7798 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7799 if (val)
7800 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007801 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007802
7803 /* TODO: Close Doorbell port? */
7804}
7805
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007806static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7807{
7808 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7809 BP_FUNC(bp), reset_code);
7810
7811 switch (reset_code) {
7812 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7813 bnx2x_reset_port(bp);
7814 bnx2x_reset_func(bp);
7815 bnx2x_reset_common(bp);
7816 break;
7817
7818 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7819 bnx2x_reset_port(bp);
7820 bnx2x_reset_func(bp);
7821 break;
7822
7823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7824 bnx2x_reset_func(bp);
7825 break;
7826
7827 default:
7828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7829 break;
7830 }
7831}
7832
Eilon Greenstein33471622008-08-13 15:59:08 -07007833/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007834static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007835{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007836 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007837 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007838 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007839
Michael Chan993ac7b2009-10-10 13:46:56 +00007840#ifdef BCM_CNIC
7841 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7842#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007843 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7844
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007845 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007846 bp->rx_mode = BNX2X_RX_MODE_NONE;
7847 bnx2x_set_storm_rx_mode(bp);
7848
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007849 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007850 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007851
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007852 del_timer_sync(&bp->timer);
7853 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7854 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007855 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007856
Eilon Greenstein70b99862009-01-14 06:43:48 +00007857 /* Release IRQs */
7858 bnx2x_free_irq(bp);
7859
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007860 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007861 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007862 struct bnx2x_fastpath *fp = &bp->fp[i];
7863
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007864 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007865 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007866
Eilon Greenstein7961f792009-03-02 07:59:31 +00007867 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007868 if (!cnt) {
7869 BNX2X_ERR("timeout waiting for queue[%d]\n",
7870 i);
7871#ifdef BNX2X_STOP_ON_ERROR
7872 bnx2x_panic();
7873 return -EBUSY;
7874#else
7875 break;
7876#endif
7877 }
7878 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007879 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007880 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007881 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007882 /* Give HW time to discard old tx messages */
7883 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007884
Yitchak Gertner65abd742008-08-25 15:26:24 -07007885 if (CHIP_IS_E1(bp)) {
7886 struct mac_configuration_cmd *config =
7887 bnx2x_sp(bp, mcast_config);
7888
Michael Chane665bfd2009-10-10 13:46:54 +00007889 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007890
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007891 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007892 CAM_INVALIDATE(config->config_table[i]);
7893
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007894 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007895 if (CHIP_REV_IS_SLOW(bp))
7896 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7897 else
7898 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007899 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007900 config->hdr.reserved1 = 0;
7901
Michael Chane665bfd2009-10-10 13:46:54 +00007902 bp->set_mac_pending++;
7903 smp_wmb();
7904
Yitchak Gertner65abd742008-08-25 15:26:24 -07007905 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7906 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7907 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7908
7909 } else { /* E1H */
7910 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7911
Michael Chane665bfd2009-10-10 13:46:54 +00007912 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007913
7914 for (i = 0; i < MC_HASH_SIZE; i++)
7915 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007916
7917 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007918 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007919#ifdef BCM_CNIC
7920 /* Clear iSCSI L2 MAC */
7921 mutex_lock(&bp->cnic_mutex);
7922 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7923 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7924 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7925 }
7926 mutex_unlock(&bp->cnic_mutex);
7927#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007928
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007929 if (unload_mode == UNLOAD_NORMAL)
7930 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007931
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007932 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007933 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007934
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007935 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007936 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007937 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007938 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007939 /* The mac address is written to entries 1-4 to
7940 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007941 u8 entry = (BP_E1HVN(bp) + 1)*8;
7942
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007943 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007944 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007945
7946 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7947 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007948 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007949
7950 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007951
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007952 } else
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7954
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007955 /* Close multi and leading connections
7956 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007957 for_each_nondefault_queue(bp, i)
7958 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007959 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007960
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007961 rc = bnx2x_stop_leading(bp);
7962 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007963 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007964#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007965 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007966#else
7967 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007968#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007969 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007970
Eliezer Tamir228241e2008-02-28 11:56:57 -08007971unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007972 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007973 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007974 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007975 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007976 load_count[0], load_count[1], load_count[2]);
7977 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007978 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007979 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007980 load_count[0], load_count[1], load_count[2]);
7981 if (load_count[0] == 0)
7982 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007983 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007984 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7985 else
7986 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7987 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007988
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007989 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7990 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7991 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007992
7993 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007994 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007995
7996 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007997 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007999
Eilon Greenstein9a035442008-11-03 16:45:55 -08008000 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008002 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008003 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008004 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008005 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008006 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008007 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008008 bnx2x_free_mem(bp);
8009
8010 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008011
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008012 netif_carrier_off(bp->dev);
8013
8014 return 0;
8015}
8016
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008017static void bnx2x_reset_task(struct work_struct *work)
8018{
8019 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8020
8021#ifdef BNX2X_STOP_ON_ERROR
8022 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8023 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008024 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008025 return;
8026#endif
8027
8028 rtnl_lock();
8029
8030 if (!netif_running(bp->dev))
8031 goto reset_task_exit;
8032
8033 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8034 bnx2x_nic_load(bp, LOAD_NORMAL);
8035
8036reset_task_exit:
8037 rtnl_unlock();
8038}
8039
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008040/* end of nic load/unload */
8041
8042/* ethtool_ops */
8043
8044/*
8045 * Init service functions
8046 */
8047
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008048static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8049{
8050 switch (func) {
8051 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8052 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8053 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8054 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8055 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8056 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8057 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8058 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8059 default:
8060 BNX2X_ERR("Unsupported function index: %d\n", func);
8061 return (u32)(-1);
8062 }
8063}
8064
8065static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8066{
8067 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8068
8069 /* Flush all outstanding writes */
8070 mmiowb();
8071
8072 /* Pretend to be function 0 */
8073 REG_WR(bp, reg, 0);
8074 /* Flush the GRC transaction (in the chip) */
8075 new_val = REG_RD(bp, reg);
8076 if (new_val != 0) {
8077 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8078 new_val);
8079 BUG();
8080 }
8081
8082 /* From now we are in the "like-E1" mode */
8083 bnx2x_int_disable(bp);
8084
8085 /* Flush all outstanding writes */
8086 mmiowb();
8087
8088 /* Restore the original funtion settings */
8089 REG_WR(bp, reg, orig_func);
8090 new_val = REG_RD(bp, reg);
8091 if (new_val != orig_func) {
8092 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8093 orig_func, new_val);
8094 BUG();
8095 }
8096}
8097
8098static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8099{
8100 if (CHIP_IS_E1H(bp))
8101 bnx2x_undi_int_disable_e1h(bp, func);
8102 else
8103 bnx2x_int_disable(bp);
8104}
8105
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008106static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008107{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008108 u32 val;
8109
8110 /* Check if there is any driver already loaded */
8111 val = REG_RD(bp, MISC_REG_UNPREPARED);
8112 if (val == 0x1) {
8113 /* Check if it is the UNDI driver
8114 * UNDI driver initializes CID offset for normal bell to 0x7
8115 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008116 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008117 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8118 if (val == 0x7) {
8119 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008120 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008121 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008122 u32 swap_en;
8123 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008124
Eilon Greensteinb4661732009-01-14 06:43:56 +00008125 /* clear the UNDI indication */
8126 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8127
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008128 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8129
8130 /* try unload UNDI on port 0 */
8131 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008132 bp->fw_seq =
8133 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8134 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008135 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008136
8137 /* if UNDI is loaded on the other port */
8138 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8139
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008140 /* send "DONE" for previous unload */
8141 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8142
8143 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008144 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008145 bp->fw_seq =
8146 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147 DRV_MSG_SEQ_NUMBER_MASK);
8148 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008149
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008150 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008151 }
8152
Eilon Greensteinb4661732009-01-14 06:43:56 +00008153 /* now it's safe to release the lock */
8154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8155
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008156 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008157
8158 /* close input traffic and wait for it */
8159 /* Do not rcv packets to BRB */
8160 REG_WR(bp,
8161 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8162 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8163 /* Do not direct rcv packets that are not for MCP to
8164 * the BRB */
8165 REG_WR(bp,
8166 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8167 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8168 /* clear AEU */
8169 REG_WR(bp,
8170 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8171 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8172 msleep(10);
8173
8174 /* save NIG port swap info */
8175 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8176 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008177 /* reset device */
8178 REG_WR(bp,
8179 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008180 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008181 REG_WR(bp,
8182 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8183 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008184 /* take the NIG out of reset and restore swap values */
8185 REG_WR(bp,
8186 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8187 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8188 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8189 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8190
8191 /* send unload done to the MCP */
8192 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8193
8194 /* restore our func and fw_seq */
8195 bp->func = func;
8196 bp->fw_seq =
8197 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008199
8200 } else
8201 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008202 }
8203}
8204
8205static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8206{
8207 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008208 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008209
8210 /* Get the chip revision id and number. */
8211 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8212 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8213 id = ((val & 0xffff) << 16);
8214 val = REG_RD(bp, MISC_REG_CHIP_REV);
8215 id |= ((val & 0xf) << 12);
8216 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8217 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008218 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008219 id |= (val & 0xf);
8220 bp->common.chip_id = id;
8221 bp->link_params.chip_id = bp->common.chip_id;
8222 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8223
Eilon Greenstein1c063282009-02-12 08:36:43 +00008224 val = (REG_RD(bp, 0x2874) & 0x55);
8225 if ((bp->common.chip_id & 0x1) ||
8226 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8227 bp->flags |= ONE_PORT_FLAG;
8228 BNX2X_DEV_INFO("single port device\n");
8229 }
8230
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008231 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8232 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8233 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8234 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8235 bp->common.flash_size, bp->common.flash_size);
8236
8237 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008238 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008239 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008240 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8241 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008242
8243 if (!bp->common.shmem_base ||
8244 (bp->common.shmem_base < 0xA0000) ||
8245 (bp->common.shmem_base >= 0xC0000)) {
8246 BNX2X_DEV_INFO("MCP not active\n");
8247 bp->flags |= NO_MCP_FLAG;
8248 return;
8249 }
8250
8251 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8252 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8253 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254 BNX2X_ERR("BAD MCP validity signature\n");
8255
8256 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008257 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008258
8259 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8260 SHARED_HW_CFG_LED_MODE_MASK) >>
8261 SHARED_HW_CFG_LED_MODE_SHIFT);
8262
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008263 bp->link_params.feature_config_flags = 0;
8264 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8265 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8266 bp->link_params.feature_config_flags |=
8267 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8268 else
8269 bp->link_params.feature_config_flags &=
8270 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8271
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008272 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8273 bp->common.bc_ver = val;
8274 BNX2X_DEV_INFO("bc_ver %X\n", val);
8275 if (val < BNX2X_BC_VER) {
8276 /* for now only warn
8277 * later we might need to enforce this */
8278 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8279 " please upgrade BC\n", BNX2X_BC_VER, val);
8280 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008281 bp->link_params.feature_config_flags |=
8282 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8283 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008284
8285 if (BP_E1HVN(bp) == 0) {
8286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8287 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8288 } else {
8289 /* no WOL capability for E1HVN != 0 */
8290 bp->flags |= NO_WOL_FLAG;
8291 }
8292 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008293 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008294
8295 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8296 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8297 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8299
8300 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8301 val, val2, val3, val4);
8302}
8303
8304static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305 u32 switch_cfg)
8306{
8307 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008308 u32 ext_phy_type;
8309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008310 switch (switch_cfg) {
8311 case SWITCH_CFG_1G:
8312 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008314 ext_phy_type =
8315 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008316 switch (ext_phy_type) {
8317 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319 ext_phy_type);
8320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008321 bp->port.supported |= (SUPPORTED_10baseT_Half |
8322 SUPPORTED_10baseT_Full |
8323 SUPPORTED_100baseT_Half |
8324 SUPPORTED_100baseT_Full |
8325 SUPPORTED_1000baseT_Full |
8326 SUPPORTED_2500baseX_Full |
8327 SUPPORTED_TP |
8328 SUPPORTED_FIBRE |
8329 SUPPORTED_Autoneg |
8330 SUPPORTED_Pause |
8331 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008332 break;
8333
8334 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336 ext_phy_type);
8337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008338 bp->port.supported |= (SUPPORTED_10baseT_Half |
8339 SUPPORTED_10baseT_Full |
8340 SUPPORTED_100baseT_Half |
8341 SUPPORTED_100baseT_Full |
8342 SUPPORTED_1000baseT_Full |
8343 SUPPORTED_TP |
8344 SUPPORTED_FIBRE |
8345 SUPPORTED_Autoneg |
8346 SUPPORTED_Pause |
8347 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008348 break;
8349
8350 default:
8351 BNX2X_ERR("NVRAM config error. "
8352 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008353 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008354 return;
8355 }
8356
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008357 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358 port*0x10);
8359 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008360 break;
8361
8362 case SWITCH_CFG_10G:
8363 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008365 ext_phy_type =
8366 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008367 switch (ext_phy_type) {
8368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370 ext_phy_type);
8371
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008372 bp->port.supported |= (SUPPORTED_10baseT_Half |
8373 SUPPORTED_10baseT_Full |
8374 SUPPORTED_100baseT_Half |
8375 SUPPORTED_100baseT_Full |
8376 SUPPORTED_1000baseT_Full |
8377 SUPPORTED_2500baseX_Full |
8378 SUPPORTED_10000baseT_Full |
8379 SUPPORTED_TP |
8380 SUPPORTED_FIBRE |
8381 SUPPORTED_Autoneg |
8382 SUPPORTED_Pause |
8383 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008384 break;
8385
Eliezer Tamirf1410642008-02-28 11:51:50 -08008386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8388 ext_phy_type);
8389
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008390 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391 SUPPORTED_1000baseT_Full |
8392 SUPPORTED_FIBRE |
8393 SUPPORTED_Autoneg |
8394 SUPPORTED_Pause |
8395 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008396 break;
8397
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8400 ext_phy_type);
8401
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403 SUPPORTED_2500baseX_Full |
8404 SUPPORTED_1000baseT_Full |
8405 SUPPORTED_FIBRE |
8406 SUPPORTED_Autoneg |
8407 SUPPORTED_Pause |
8408 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008409 break;
8410
Eilon Greenstein589abe32009-02-12 08:36:55 +00008411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413 ext_phy_type);
8414
8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416 SUPPORTED_FIBRE |
8417 SUPPORTED_Pause |
8418 SUPPORTED_Asym_Pause);
8419 break;
8420
8421 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8423 ext_phy_type);
8424
8425 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426 SUPPORTED_1000baseT_Full |
8427 SUPPORTED_FIBRE |
8428 SUPPORTED_Pause |
8429 SUPPORTED_Asym_Pause);
8430 break;
8431
8432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8434 ext_phy_type);
8435
8436 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437 SUPPORTED_1000baseT_Full |
8438 SUPPORTED_Autoneg |
8439 SUPPORTED_FIBRE |
8440 SUPPORTED_Pause |
8441 SUPPORTED_Asym_Pause);
8442 break;
8443
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446 ext_phy_type);
8447
8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449 SUPPORTED_1000baseT_Full |
8450 SUPPORTED_Autoneg |
8451 SUPPORTED_FIBRE |
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
8454 break;
8455
Eliezer Tamirf1410642008-02-28 11:51:50 -08008456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458 ext_phy_type);
8459
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461 SUPPORTED_TP |
8462 SUPPORTED_Autoneg |
8463 SUPPORTED_Pause |
8464 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008465 break;
8466
Eilon Greenstein28577182009-02-12 08:37:00 +00008467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469 ext_phy_type);
8470
8471 bp->port.supported |= (SUPPORTED_10baseT_Half |
8472 SUPPORTED_10baseT_Full |
8473 SUPPORTED_100baseT_Half |
8474 SUPPORTED_100baseT_Full |
8475 SUPPORTED_1000baseT_Full |
8476 SUPPORTED_10000baseT_Full |
8477 SUPPORTED_TP |
8478 SUPPORTED_Autoneg |
8479 SUPPORTED_Pause |
8480 SUPPORTED_Asym_Pause);
8481 break;
8482
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485 bp->link_params.ext_phy_config);
8486 break;
8487
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008488 default:
8489 BNX2X_ERR("NVRAM config error. "
8490 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008491 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008492 return;
8493 }
8494
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008495 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496 port*0x18);
8497 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008498
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008499 break;
8500
8501 default:
8502 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008503 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008504 return;
8505 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008506 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008507
8508 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008509 if (!(bp->link_params.speed_cap_mask &
8510 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008511 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008512
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008513 if (!(bp->link_params.speed_cap_mask &
8514 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008515 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008516
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008517 if (!(bp->link_params.speed_cap_mask &
8518 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008519 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008520
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008523 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008524
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008527 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008529
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008532 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008533
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008536 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008538 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008539}
8540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008541static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008543 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008544
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008545 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008546 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008547 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008548 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008549 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008551 u32 ext_phy_type =
8552 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554 if ((ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556 (ext_phy_type ==
8557 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008558 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008559 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008561 (ADVERTISED_10000baseT_Full |
8562 ADVERTISED_FIBRE);
8563 break;
8564 }
8565 BNX2X_ERR("NVRAM config error. "
8566 "Invalid link_config 0x%x"
8567 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008568 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008569 return;
8570 }
8571 break;
8572
8573 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008574 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008575 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008576 bp->port.advertising = (ADVERTISED_10baseT_Full |
8577 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008578 } else {
8579 BNX2X_ERR("NVRAM config error. "
8580 "Invalid link_config 0x%x"
8581 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008582 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008583 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008584 return;
8585 }
8586 break;
8587
8588 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008589 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008590 bp->link_params.req_line_speed = SPEED_10;
8591 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008592 bp->port.advertising = (ADVERTISED_10baseT_Half |
8593 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008594 } else {
8595 BNX2X_ERR("NVRAM config error. "
8596 "Invalid link_config 0x%x"
8597 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008598 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008599 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600 return;
8601 }
8602 break;
8603
8604 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008605 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008606 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008607 bp->port.advertising = (ADVERTISED_100baseT_Full |
8608 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008609 } else {
8610 BNX2X_ERR("NVRAM config error. "
8611 "Invalid link_config 0x%x"
8612 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008613 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008614 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008615 return;
8616 }
8617 break;
8618
8619 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008621 bp->link_params.req_line_speed = SPEED_100;
8622 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008623 bp->port.advertising = (ADVERTISED_100baseT_Half |
8624 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008625 } else {
8626 BNX2X_ERR("NVRAM config error. "
8627 "Invalid link_config 0x%x"
8628 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008629 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008630 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008631 return;
8632 }
8633 break;
8634
8635 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008636 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008637 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008638 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008640 } else {
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008644 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008645 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008646 return;
8647 }
8648 break;
8649
8650 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008651 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008652 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008653 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008655 } else {
8656 BNX2X_ERR("NVRAM config error. "
8657 "Invalid link_config 0x%x"
8658 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008659 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008660 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008661 return;
8662 }
8663 break;
8664
8665 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008668 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008669 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008670 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008672 } else {
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008676 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008677 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008678 return;
8679 }
8680 break;
8681
8682 default:
8683 BNX2X_ERR("NVRAM config error. "
8684 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008685 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008686 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008687 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008688 break;
8689 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008691 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008693 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008694 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008695 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008696
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008697 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008698 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008699 bp->link_params.req_line_speed,
8700 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008701 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008702}
8703
Michael Chane665bfd2009-10-10 13:46:54 +00008704static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705{
8706 mac_hi = cpu_to_be16(mac_hi);
8707 mac_lo = cpu_to_be32(mac_lo);
8708 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710}
8711
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008713{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 int port = BP_PORT(bp);
8715 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008716 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008717 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008718 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008719
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008720 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008721 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008722
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008723 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008724 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008725 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008726 SHMEM_RD(bp,
8727 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008728 /* BCM8727_NOC => BCM8727 no over current */
8729 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731 bp->link_params.ext_phy_config &=
8732 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733 bp->link_params.ext_phy_config |=
8734 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735 bp->link_params.feature_config_flags |=
8736 FEATURE_CONFIG_BCM8727_NOC;
8737 }
8738
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008739 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740 SHMEM_RD(bp,
8741 dev_info.port_hw_config[port].speed_capability_mask);
8742
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008743 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008744 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008746 /* Get the 4 lanes xgxs config rx and tx */
8747 for (i = 0; i < 2; i++) {
8748 val = SHMEM_RD(bp,
8749 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753 val = SHMEM_RD(bp,
8754 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757 }
8758
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008759 /* If the device is capable of WoL, set the default state according
8760 * to the HW
8761 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008762 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008763 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764 (config & PORT_FEATURE_WOL_ENABLED));
8765
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008766 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8767 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008768 bp->link_params.lane_config,
8769 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008770 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008771
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008772 bp->link_params.switch_cfg |= (bp->port.link_config &
8773 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008774 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008775
8776 bnx2x_link_settings_requested(bp);
8777
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008778 /*
8779 * If connected directly, work with the internal PHY, otherwise, work
8780 * with the external PHY
8781 */
8782 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008789 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008790
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008791 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008793 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008794 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008796
8797#ifdef BCM_CNIC
8798 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008802}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008804static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805{
8806 int func = BP_FUNC(bp);
8807 u32 val, val2;
8808 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008809
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008810 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008811
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008812 bp->e1hov = 0;
8813 bp->e1hmf = 0;
8814 if (CHIP_IS_E1H(bp)) {
8815 bp->mf_config =
8816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008817
Eilon Greenstein2691d512009-08-12 08:22:08 +00008818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008819 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008821 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008822 BNX2X_DEV_INFO("%s function mode\n",
8823 IS_E1HMF(bp) ? "multi" : "single");
8824
8825 if (IS_E1HMF(bp)) {
8826 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827 e1hov_tag) &
8828 FUNC_MF_CFG_E1HOV_TAG_MASK);
8829 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830 bp->e1hov = val;
8831 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832 "(0x%04x)\n",
8833 func, bp->e1hov, bp->e1hov);
8834 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008835 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8836 " aborting\n", func);
8837 rc = -EPERM;
8838 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008839 } else {
8840 if (BP_E1HVN(bp)) {
8841 BNX2X_ERR("!!! VN %d in single function mode,"
8842 " aborting\n", BP_E1HVN(bp));
8843 rc = -EPERM;
8844 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008845 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008846 }
8847
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008848 if (!BP_NOMCP(bp)) {
8849 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008851 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852 DRV_MSG_SEQ_NUMBER_MASK);
8853 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008855
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008856 if (IS_E1HMF(bp)) {
8857 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8859 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8866 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868 ETH_ALEN);
8869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870 ETH_ALEN);
8871 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008872
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008873 return rc;
8874 }
8875
8876 if (BP_NOMCP(bp)) {
8877 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008878 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 random_ether_addr(bp->dev->dev_addr);
8880 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881 }
8882
8883 return rc;
8884}
8885
8886static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887{
8888 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008889 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008890 int rc;
8891
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008892 /* Disable interrupt handling until HW is initialized */
8893 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008894 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008895
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008896 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008897 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008898#ifdef BCM_CNIC
8899 mutex_init(&bp->cnic_mutex);
8900#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008901
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008902 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008903 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905 rc = bnx2x_get_hwinfo(bp);
8906
8907 /* need to reset chip if undi was active */
8908 if (!BP_NOMCP(bp))
8909 bnx2x_undi_unload(bp);
8910
8911 if (CHIP_REV_IS_FPGA(bp))
8912 printk(KERN_ERR PFX "FPGA detected\n");
8913
8914 if (BP_NOMCP(bp) && (func == 0))
8915 printk(KERN_ERR PFX
8916 "MCP disabled, must load devices in order!\n");
8917
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008918 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008919 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008921 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008922 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008923 multi_mode = ETH_RSS_MODE_DISABLED;
8924 }
8925 bp->multi_mode = multi_mode;
8926
8927
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008928 /* Set TPA flags */
8929 if (disable_tpa) {
8930 bp->flags &= ~TPA_ENABLE_FLAG;
8931 bp->dev->features &= ~NETIF_F_LRO;
8932 } else {
8933 bp->flags |= TPA_ENABLE_FLAG;
8934 bp->dev->features |= NETIF_F_LRO;
8935 }
8936
Eilon Greensteina18f5122009-08-12 08:23:26 +00008937 if (CHIP_IS_E1(bp))
8938 bp->dropless_fc = 0;
8939 else
8940 bp->dropless_fc = dropless_fc;
8941
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008942 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008943
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008944 bp->tx_ring_size = MAX_TX_AVAIL;
8945 bp->rx_ring_size = MAX_RX_AVAIL;
8946
8947 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008948
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008949 /* make sure that the numbers are in the right granularity */
8950 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8951 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008952
Eilon Greenstein87942b42009-02-12 08:36:49 +00008953 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8954 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008955
8956 init_timer(&bp->timer);
8957 bp->timer.expires = jiffies + bp->current_interval;
8958 bp->timer.data = (unsigned long) bp;
8959 bp->timer.function = bnx2x_timer;
8960
8961 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008962}
8963
8964/*
8965 * ethtool service functions
8966 */
8967
8968/* All ethtool functions called with rtnl_lock */
8969
8970static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8971{
8972 struct bnx2x *bp = netdev_priv(dev);
8973
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008974 cmd->supported = bp->port.supported;
8975 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008976
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008977 if ((bp->state == BNX2X_STATE_OPEN) &&
8978 !(bp->flags & MF_FUNC_DIS) &&
8979 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008980 cmd->speed = bp->link_vars.line_speed;
8981 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008982 if (IS_E1HMF(bp)) {
8983 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008984
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008985 vn_max_rate =
8986 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008987 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008988 if (vn_max_rate < cmd->speed)
8989 cmd->speed = vn_max_rate;
8990 }
8991 } else {
8992 cmd->speed = -1;
8993 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008994 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008995
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008996 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8997 u32 ext_phy_type =
8998 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008999
9000 switch (ext_phy_type) {
9001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009008 cmd->port = PORT_FIBRE;
9009 break;
9010
9011 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009013 cmd->port = PORT_TP;
9014 break;
9015
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9017 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9018 bp->link_params.ext_phy_config);
9019 break;
9020
Eliezer Tamirf1410642008-02-28 11:51:50 -08009021 default:
9022 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009023 bp->link_params.ext_phy_config);
9024 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009025 }
9026 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009027 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009028
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009029 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009030 cmd->transceiver = XCVR_INTERNAL;
9031
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009032 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009034 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009035 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009036
9037 cmd->maxtxpkt = 0;
9038 cmd->maxrxpkt = 0;
9039
9040 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9041 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9042 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9043 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9044 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9045 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9046 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9047
9048 return 0;
9049}
9050
9051static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9052{
9053 struct bnx2x *bp = netdev_priv(dev);
9054 u32 advertising;
9055
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009056 if (IS_E1HMF(bp))
9057 return 0;
9058
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9066
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009067 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009068 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9069 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009071 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009072
9073 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009074 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009075
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009076 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9077 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009078 bp->port.advertising |= (ADVERTISED_Autoneg |
9079 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009080
9081 } else { /* forced speed */
9082 /* advertise the requested speed and duplex if supported */
9083 switch (cmd->speed) {
9084 case SPEED_10:
9085 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009086 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009087 SUPPORTED_10baseT_Full)) {
9088 DP(NETIF_MSG_LINK,
9089 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009090 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009091 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009092
9093 advertising = (ADVERTISED_10baseT_Full |
9094 ADVERTISED_TP);
9095 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009096 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009097 SUPPORTED_10baseT_Half)) {
9098 DP(NETIF_MSG_LINK,
9099 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009100 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009101 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009102
9103 advertising = (ADVERTISED_10baseT_Half |
9104 ADVERTISED_TP);
9105 }
9106 break;
9107
9108 case SPEED_100:
9109 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009110 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009111 SUPPORTED_100baseT_Full)) {
9112 DP(NETIF_MSG_LINK,
9113 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009114 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009115 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009116
9117 advertising = (ADVERTISED_100baseT_Full |
9118 ADVERTISED_TP);
9119 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009120 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009121 SUPPORTED_100baseT_Half)) {
9122 DP(NETIF_MSG_LINK,
9123 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009124 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009125 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009126
9127 advertising = (ADVERTISED_100baseT_Half |
9128 ADVERTISED_TP);
9129 }
9130 break;
9131
9132 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009133 if (cmd->duplex != DUPLEX_FULL) {
9134 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009135 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009136 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009137
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009138 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009139 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009141 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009142
9143 advertising = (ADVERTISED_1000baseT_Full |
9144 ADVERTISED_TP);
9145 break;
9146
9147 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009148 if (cmd->duplex != DUPLEX_FULL) {
9149 DP(NETIF_MSG_LINK,
9150 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009151 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009152 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009153
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009154 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009155 DP(NETIF_MSG_LINK,
9156 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009157 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009158 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009159
Eliezer Tamirf1410642008-02-28 11:51:50 -08009160 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009161 ADVERTISED_TP);
9162 break;
9163
9164 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009165 if (cmd->duplex != DUPLEX_FULL) {
9166 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009167 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009168 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009169
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009170 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009171 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009172 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009173 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009174
9175 advertising = (ADVERTISED_10000baseT_Full |
9176 ADVERTISED_FIBRE);
9177 break;
9178
9179 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009180 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009181 return -EINVAL;
9182 }
9183
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009184 bp->link_params.req_line_speed = cmd->speed;
9185 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009186 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009187 }
9188
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009189 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009190 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009191 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009192 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009194 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009195 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009196 bnx2x_link_set(bp);
9197 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009198
9199 return 0;
9200}
9201
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009202#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9203#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9204
9205static int bnx2x_get_regs_len(struct net_device *dev)
9206{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009207 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009208 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009209 int i;
9210
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009211 if (CHIP_IS_E1(bp)) {
9212 for (i = 0; i < REGS_COUNT; i++)
9213 if (IS_E1_ONLINE(reg_addrs[i].info))
9214 regdump_len += reg_addrs[i].size;
9215
9216 for (i = 0; i < WREGS_COUNT_E1; i++)
9217 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9218 regdump_len += wreg_addrs_e1[i].size *
9219 (1 + wreg_addrs_e1[i].read_regs_count);
9220
9221 } else { /* E1H */
9222 for (i = 0; i < REGS_COUNT; i++)
9223 if (IS_E1H_ONLINE(reg_addrs[i].info))
9224 regdump_len += reg_addrs[i].size;
9225
9226 for (i = 0; i < WREGS_COUNT_E1H; i++)
9227 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9228 regdump_len += wreg_addrs_e1h[i].size *
9229 (1 + wreg_addrs_e1h[i].read_regs_count);
9230 }
9231 regdump_len *= 4;
9232 regdump_len += sizeof(struct dump_hdr);
9233
9234 return regdump_len;
9235}
9236
9237static void bnx2x_get_regs(struct net_device *dev,
9238 struct ethtool_regs *regs, void *_p)
9239{
9240 u32 *p = _p, i, j;
9241 struct bnx2x *bp = netdev_priv(dev);
9242 struct dump_hdr dump_hdr = {0};
9243
9244 regs->version = 0;
9245 memset(p, 0, regs->len);
9246
9247 if (!netif_running(bp->dev))
9248 return;
9249
9250 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9251 dump_hdr.dump_sign = dump_sign_all;
9252 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9253 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9254 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9255 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9256 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9257
9258 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9259 p += dump_hdr.hdr_size + 1;
9260
9261 if (CHIP_IS_E1(bp)) {
9262 for (i = 0; i < REGS_COUNT; i++)
9263 if (IS_E1_ONLINE(reg_addrs[i].info))
9264 for (j = 0; j < reg_addrs[i].size; j++)
9265 *p++ = REG_RD(bp,
9266 reg_addrs[i].addr + j*4);
9267
9268 } else { /* E1H */
9269 for (i = 0; i < REGS_COUNT; i++)
9270 if (IS_E1H_ONLINE(reg_addrs[i].info))
9271 for (j = 0; j < reg_addrs[i].size; j++)
9272 *p++ = REG_RD(bp,
9273 reg_addrs[i].addr + j*4);
9274 }
9275}
9276
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009277#define PHY_FW_VER_LEN 10
9278
9279static void bnx2x_get_drvinfo(struct net_device *dev,
9280 struct ethtool_drvinfo *info)
9281{
9282 struct bnx2x *bp = netdev_priv(dev);
9283 u8 phy_fw_ver[PHY_FW_VER_LEN];
9284
9285 strcpy(info->driver, DRV_MODULE_NAME);
9286 strcpy(info->version, DRV_MODULE_VERSION);
9287
9288 phy_fw_ver[0] = '\0';
9289 if (bp->port.pmf) {
9290 bnx2x_acquire_phy_lock(bp);
9291 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9292 (bp->state != BNX2X_STATE_CLOSED),
9293 phy_fw_ver, PHY_FW_VER_LEN);
9294 bnx2x_release_phy_lock(bp);
9295 }
9296
9297 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9298 (bp->common.bc_ver & 0xff0000) >> 16,
9299 (bp->common.bc_ver & 0xff00) >> 8,
9300 (bp->common.bc_ver & 0xff),
9301 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9302 strcpy(info->bus_info, pci_name(bp->pdev));
9303 info->n_stats = BNX2X_NUM_STATS;
9304 info->testinfo_len = BNX2X_NUM_TESTS;
9305 info->eedump_len = bp->common.flash_size;
9306 info->regdump_len = bnx2x_get_regs_len(dev);
9307}
9308
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009309static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9310{
9311 struct bnx2x *bp = netdev_priv(dev);
9312
9313 if (bp->flags & NO_WOL_FLAG) {
9314 wol->supported = 0;
9315 wol->wolopts = 0;
9316 } else {
9317 wol->supported = WAKE_MAGIC;
9318 if (bp->wol)
9319 wol->wolopts = WAKE_MAGIC;
9320 else
9321 wol->wolopts = 0;
9322 }
9323 memset(&wol->sopass, 0, sizeof(wol->sopass));
9324}
9325
9326static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9327{
9328 struct bnx2x *bp = netdev_priv(dev);
9329
9330 if (wol->wolopts & ~WAKE_MAGIC)
9331 return -EINVAL;
9332
9333 if (wol->wolopts & WAKE_MAGIC) {
9334 if (bp->flags & NO_WOL_FLAG)
9335 return -EINVAL;
9336
9337 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009338 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009339 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009340
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009341 return 0;
9342}
9343
9344static u32 bnx2x_get_msglevel(struct net_device *dev)
9345{
9346 struct bnx2x *bp = netdev_priv(dev);
9347
9348 return bp->msglevel;
9349}
9350
9351static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9352{
9353 struct bnx2x *bp = netdev_priv(dev);
9354
9355 if (capable(CAP_NET_ADMIN))
9356 bp->msglevel = level;
9357}
9358
9359static int bnx2x_nway_reset(struct net_device *dev)
9360{
9361 struct bnx2x *bp = netdev_priv(dev);
9362
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009363 if (!bp->port.pmf)
9364 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009365
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009366 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009368 bnx2x_link_set(bp);
9369 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009370
9371 return 0;
9372}
9373
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009374static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009375{
9376 struct bnx2x *bp = netdev_priv(dev);
9377
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009378 if (bp->flags & MF_FUNC_DIS)
9379 return 0;
9380
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009381 return bp->link_vars.link_up;
9382}
9383
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009384static int bnx2x_get_eeprom_len(struct net_device *dev)
9385{
9386 struct bnx2x *bp = netdev_priv(dev);
9387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009388 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009389}
9390
9391static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9392{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009393 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009394 int count, i;
9395 u32 val = 0;
9396
9397 /* adjust timeout for emulation/FPGA */
9398 count = NVRAM_TIMEOUT_COUNT;
9399 if (CHIP_REV_IS_SLOW(bp))
9400 count *= 100;
9401
9402 /* request access to nvram interface */
9403 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9404 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9405
9406 for (i = 0; i < count*10; i++) {
9407 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9408 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9409 break;
9410
9411 udelay(5);
9412 }
9413
9414 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009415 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009416 return -EBUSY;
9417 }
9418
9419 return 0;
9420}
9421
9422static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9423{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009424 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009425 int count, i;
9426 u32 val = 0;
9427
9428 /* adjust timeout for emulation/FPGA */
9429 count = NVRAM_TIMEOUT_COUNT;
9430 if (CHIP_REV_IS_SLOW(bp))
9431 count *= 100;
9432
9433 /* relinquish nvram interface */
9434 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9435 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9436
9437 for (i = 0; i < count*10; i++) {
9438 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9439 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9440 break;
9441
9442 udelay(5);
9443 }
9444
9445 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009446 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009447 return -EBUSY;
9448 }
9449
9450 return 0;
9451}
9452
9453static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9454{
9455 u32 val;
9456
9457 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9458
9459 /* enable both bits, even on read */
9460 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9461 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9462 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9463}
9464
9465static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9466{
9467 u32 val;
9468
9469 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9470
9471 /* disable both bits, even after read */
9472 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9474 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9475}
9476
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009477static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009478 u32 cmd_flags)
9479{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009480 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009481 u32 val;
9482
9483 /* build the command word */
9484 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9485
9486 /* need to clear DONE bit separately */
9487 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9488
9489 /* address of the NVRAM to read from */
9490 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9491 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9492
9493 /* issue a read command */
9494 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9495
9496 /* adjust timeout for emulation/FPGA */
9497 count = NVRAM_TIMEOUT_COUNT;
9498 if (CHIP_REV_IS_SLOW(bp))
9499 count *= 100;
9500
9501 /* wait for completion */
9502 *ret_val = 0;
9503 rc = -EBUSY;
9504 for (i = 0; i < count; i++) {
9505 udelay(5);
9506 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9507
9508 if (val & MCPR_NVM_COMMAND_DONE) {
9509 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009510 /* we read nvram data in cpu order
9511 * but ethtool sees it as an array of bytes
9512 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009513 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009514 rc = 0;
9515 break;
9516 }
9517 }
9518
9519 return rc;
9520}
9521
9522static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9523 int buf_size)
9524{
9525 int rc;
9526 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009527 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009528
9529 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009530 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009531 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009532 offset, buf_size);
9533 return -EINVAL;
9534 }
9535
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009536 if (offset + buf_size > bp->common.flash_size) {
9537 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009538 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009539 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009540 return -EINVAL;
9541 }
9542
9543 /* request access to nvram interface */
9544 rc = bnx2x_acquire_nvram_lock(bp);
9545 if (rc)
9546 return rc;
9547
9548 /* enable access to nvram interface */
9549 bnx2x_enable_nvram_access(bp);
9550
9551 /* read the first word(s) */
9552 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9553 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9554 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9555 memcpy(ret_buf, &val, 4);
9556
9557 /* advance to the next dword */
9558 offset += sizeof(u32);
9559 ret_buf += sizeof(u32);
9560 buf_size -= sizeof(u32);
9561 cmd_flags = 0;
9562 }
9563
9564 if (rc == 0) {
9565 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9566 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567 memcpy(ret_buf, &val, 4);
9568 }
9569
9570 /* disable access to nvram interface */
9571 bnx2x_disable_nvram_access(bp);
9572 bnx2x_release_nvram_lock(bp);
9573
9574 return rc;
9575}
9576
9577static int bnx2x_get_eeprom(struct net_device *dev,
9578 struct ethtool_eeprom *eeprom, u8 *eebuf)
9579{
9580 struct bnx2x *bp = netdev_priv(dev);
9581 int rc;
9582
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009583 if (!netif_running(dev))
9584 return -EAGAIN;
9585
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009586 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009587 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9588 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9589 eeprom->len, eeprom->len);
9590
9591 /* parameters already validated in ethtool_get_eeprom */
9592
9593 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9594
9595 return rc;
9596}
9597
9598static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9599 u32 cmd_flags)
9600{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009601 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009602
9603 /* build the command word */
9604 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9605
9606 /* need to clear DONE bit separately */
9607 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9608
9609 /* write the data */
9610 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9611
9612 /* address of the NVRAM to write to */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9614 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9615
9616 /* issue the write command */
9617 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9618
9619 /* adjust timeout for emulation/FPGA */
9620 count = NVRAM_TIMEOUT_COUNT;
9621 if (CHIP_REV_IS_SLOW(bp))
9622 count *= 100;
9623
9624 /* wait for completion */
9625 rc = -EBUSY;
9626 for (i = 0; i < count; i++) {
9627 udelay(5);
9628 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9629 if (val & MCPR_NVM_COMMAND_DONE) {
9630 rc = 0;
9631 break;
9632 }
9633 }
9634
9635 return rc;
9636}
9637
Eliezer Tamirf1410642008-02-28 11:51:50 -08009638#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009639
9640static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9641 int buf_size)
9642{
9643 int rc;
9644 u32 cmd_flags;
9645 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009646 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009647
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009648 if (offset + buf_size > bp->common.flash_size) {
9649 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009650 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009651 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009652 return -EINVAL;
9653 }
9654
9655 /* request access to nvram interface */
9656 rc = bnx2x_acquire_nvram_lock(bp);
9657 if (rc)
9658 return rc;
9659
9660 /* enable access to nvram interface */
9661 bnx2x_enable_nvram_access(bp);
9662
9663 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9664 align_offset = (offset & ~0x03);
9665 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9666
9667 if (rc == 0) {
9668 val &= ~(0xff << BYTE_OFFSET(offset));
9669 val |= (*data_buf << BYTE_OFFSET(offset));
9670
9671 /* nvram data is returned as an array of bytes
9672 * convert it back to cpu order */
9673 val = be32_to_cpu(val);
9674
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009675 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9676 cmd_flags);
9677 }
9678
9679 /* disable access to nvram interface */
9680 bnx2x_disable_nvram_access(bp);
9681 bnx2x_release_nvram_lock(bp);
9682
9683 return rc;
9684}
9685
9686static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9687 int buf_size)
9688{
9689 int rc;
9690 u32 cmd_flags;
9691 u32 val;
9692 u32 written_so_far;
9693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009694 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009695 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009696
9697 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009698 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009699 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009700 offset, buf_size);
9701 return -EINVAL;
9702 }
9703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009704 if (offset + buf_size > bp->common.flash_size) {
9705 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009706 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009707 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009708 return -EINVAL;
9709 }
9710
9711 /* request access to nvram interface */
9712 rc = bnx2x_acquire_nvram_lock(bp);
9713 if (rc)
9714 return rc;
9715
9716 /* enable access to nvram interface */
9717 bnx2x_enable_nvram_access(bp);
9718
9719 written_so_far = 0;
9720 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9721 while ((written_so_far < buf_size) && (rc == 0)) {
9722 if (written_so_far == (buf_size - sizeof(u32)))
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9726 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9727 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9728
9729 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009730
9731 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9732
9733 /* advance to the next dword */
9734 offset += sizeof(u32);
9735 data_buf += sizeof(u32);
9736 written_so_far += sizeof(u32);
9737 cmd_flags = 0;
9738 }
9739
9740 /* disable access to nvram interface */
9741 bnx2x_disable_nvram_access(bp);
9742 bnx2x_release_nvram_lock(bp);
9743
9744 return rc;
9745}
9746
9747static int bnx2x_set_eeprom(struct net_device *dev,
9748 struct ethtool_eeprom *eeprom, u8 *eebuf)
9749{
9750 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009751 int port = BP_PORT(bp);
9752 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009753
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009754 if (!netif_running(dev))
9755 return -EAGAIN;
9756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009757 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009758 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9759 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9760 eeprom->len, eeprom->len);
9761
9762 /* parameters already validated in ethtool_set_eeprom */
9763
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009764 /* PHY eeprom can be accessed only by the PMF */
9765 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9766 !bp->port.pmf)
9767 return -EINVAL;
9768
9769 if (eeprom->magic == 0x50485950) {
9770 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9771 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9772
9773 bnx2x_acquire_phy_lock(bp);
9774 rc |= bnx2x_link_reset(&bp->link_params,
9775 &bp->link_vars, 0);
9776 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9777 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9778 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9779 MISC_REGISTERS_GPIO_HIGH, port);
9780 bnx2x_release_phy_lock(bp);
9781 bnx2x_link_report(bp);
9782
9783 } else if (eeprom->magic == 0x50485952) {
9784 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009785 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009786 bnx2x_acquire_phy_lock(bp);
9787 rc |= bnx2x_link_reset(&bp->link_params,
9788 &bp->link_vars, 1);
9789
9790 rc |= bnx2x_phy_init(&bp->link_params,
9791 &bp->link_vars);
9792 bnx2x_release_phy_lock(bp);
9793 bnx2x_calc_fc_adv(bp);
9794 }
9795 } else if (eeprom->magic == 0x53985943) {
9796 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9797 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9799 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009800 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009801
9802 /* DSP Remove Download Mode */
9803 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9804 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009805
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009806 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009807
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009808 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9809
9810 /* wait 0.5 sec to allow it to run */
9811 msleep(500);
9812 bnx2x_ext_phy_hw_reset(bp, port);
9813 msleep(500);
9814 bnx2x_release_phy_lock(bp);
9815 }
9816 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009817 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009818
9819 return rc;
9820}
9821
9822static int bnx2x_get_coalesce(struct net_device *dev,
9823 struct ethtool_coalesce *coal)
9824{
9825 struct bnx2x *bp = netdev_priv(dev);
9826
9827 memset(coal, 0, sizeof(struct ethtool_coalesce));
9828
9829 coal->rx_coalesce_usecs = bp->rx_ticks;
9830 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009831
9832 return 0;
9833}
9834
Eilon Greensteinca003922009-08-12 22:53:28 -07009835#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009836static int bnx2x_set_coalesce(struct net_device *dev,
9837 struct ethtool_coalesce *coal)
9838{
9839 struct bnx2x *bp = netdev_priv(dev);
9840
9841 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009842 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9843 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009844
9845 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009846 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9847 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009848
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009849 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009850 bnx2x_update_coalesce(bp);
9851
9852 return 0;
9853}
9854
9855static void bnx2x_get_ringparam(struct net_device *dev,
9856 struct ethtool_ringparam *ering)
9857{
9858 struct bnx2x *bp = netdev_priv(dev);
9859
9860 ering->rx_max_pending = MAX_RX_AVAIL;
9861 ering->rx_mini_max_pending = 0;
9862 ering->rx_jumbo_max_pending = 0;
9863
9864 ering->rx_pending = bp->rx_ring_size;
9865 ering->rx_mini_pending = 0;
9866 ering->rx_jumbo_pending = 0;
9867
9868 ering->tx_max_pending = MAX_TX_AVAIL;
9869 ering->tx_pending = bp->tx_ring_size;
9870}
9871
9872static int bnx2x_set_ringparam(struct net_device *dev,
9873 struct ethtool_ringparam *ering)
9874{
9875 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009876 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009877
9878 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9879 (ering->tx_pending > MAX_TX_AVAIL) ||
9880 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9881 return -EINVAL;
9882
9883 bp->rx_ring_size = ering->rx_pending;
9884 bp->tx_ring_size = ering->tx_pending;
9885
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009886 if (netif_running(dev)) {
9887 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9888 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009889 }
9890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009891 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009892}
9893
9894static void bnx2x_get_pauseparam(struct net_device *dev,
9895 struct ethtool_pauseparam *epause)
9896{
9897 struct bnx2x *bp = netdev_priv(dev);
9898
Eilon Greenstein356e2382009-02-12 08:38:32 +00009899 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9900 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009901 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9902
David S. Millerc0700f92008-12-16 23:53:20 -08009903 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9904 BNX2X_FLOW_CTRL_RX);
9905 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9906 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009907
9908 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9909 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9910 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9911}
9912
9913static int bnx2x_set_pauseparam(struct net_device *dev,
9914 struct ethtool_pauseparam *epause)
9915{
9916 struct bnx2x *bp = netdev_priv(dev);
9917
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009918 if (IS_E1HMF(bp))
9919 return 0;
9920
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9924
David S. Millerc0700f92008-12-16 23:53:20 -08009925 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009926
9927 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009928 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009929
9930 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009932
David S. Millerc0700f92008-12-16 23:53:20 -08009933 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9934 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009935
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009936 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009937 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009938 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009939 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009940 }
9941
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009942 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009944 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009945
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009946 DP(NETIF_MSG_LINK,
9947 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009948
9949 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009951 bnx2x_link_set(bp);
9952 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009953
9954 return 0;
9955}
9956
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009957static int bnx2x_set_flags(struct net_device *dev, u32 data)
9958{
9959 struct bnx2x *bp = netdev_priv(dev);
9960 int changed = 0;
9961 int rc = 0;
9962
9963 /* TPA requires Rx CSUM offloading */
9964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965 if (!(dev->features & NETIF_F_LRO)) {
9966 dev->features |= NETIF_F_LRO;
9967 bp->flags |= TPA_ENABLE_FLAG;
9968 changed = 1;
9969 }
9970
9971 } else if (dev->features & NETIF_F_LRO) {
9972 dev->features &= ~NETIF_F_LRO;
9973 bp->flags &= ~TPA_ENABLE_FLAG;
9974 changed = 1;
9975 }
9976
9977 if (changed && netif_running(dev)) {
9978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980 }
9981
9982 return rc;
9983}
9984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009985static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986{
9987 struct bnx2x *bp = netdev_priv(dev);
9988
9989 return bp->rx_csum;
9990}
9991
9992static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993{
9994 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009995 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009996
9997 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009998
9999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000 TPA'ed packets will be discarded due to wrong TCP CSUM */
10001 if (!data) {
10002 u32 flags = ethtool_op_get_flags(dev);
10003
10004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005 }
10006
10007 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010008}
10009
10010static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011{
Eilon Greenstein755735e2008-06-23 20:35:13 -070010012 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010014 dev->features |= NETIF_F_TSO6;
10015 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010017 dev->features &= ~NETIF_F_TSO6;
10018 }
10019
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010020 return 0;
10021}
10022
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010023static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010024 char string[ETH_GSTRING_LEN];
10025} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010026 { "register_test (offline)" },
10027 { "memory_test (offline)" },
10028 { "loopback_test (offline)" },
10029 { "nvram_test (online)" },
10030 { "interrupt_test (online)" },
10031 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010032 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010033};
10034
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010035static int bnx2x_test_registers(struct bnx2x *bp)
10036{
10037 int idx, i, rc = -ENODEV;
10038 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010039 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010040 static const struct {
10041 u32 offset0;
10042 u32 offset1;
10043 u32 mask;
10044 } reg_tbl[] = {
10045/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10046 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10047 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10048 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10049 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10050 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10051 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10054 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10055/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10056 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10057 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10058 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10059 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10060 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10062 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010063 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010064 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10065/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010066 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10067 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10068 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10069 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10070 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10071 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10072 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10073 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010074 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10075/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010076 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10077 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10078 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10080 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10082
10083 { 0xffffffff, 0, 0x00000000 }
10084 };
10085
10086 if (!netif_running(bp->dev))
10087 return rc;
10088
10089 /* Repeat the test twice:
10090 First by writing 0x00000000, second by writing 0xffffffff */
10091 for (idx = 0; idx < 2; idx++) {
10092
10093 switch (idx) {
10094 case 0:
10095 wr_val = 0;
10096 break;
10097 case 1:
10098 wr_val = 0xffffffff;
10099 break;
10100 }
10101
10102 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010104
10105 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106 mask = reg_tbl[i].mask;
10107
10108 save_val = REG_RD(bp, offset);
10109
10110 REG_WR(bp, offset, wr_val);
10111 val = REG_RD(bp, offset);
10112
10113 /* Restore the original register's value */
10114 REG_WR(bp, offset, save_val);
10115
10116 /* verify that value is as expected value */
10117 if ((val & mask) != (wr_val & mask))
10118 goto test_reg_exit;
10119 }
10120 }
10121
10122 rc = 0;
10123
10124test_reg_exit:
10125 return rc;
10126}
10127
10128static int bnx2x_test_memory(struct bnx2x *bp)
10129{
10130 int i, j, rc = -ENODEV;
10131 u32 val;
10132 static const struct {
10133 u32 offset;
10134 int size;
10135 } mem_tbl[] = {
10136 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10137 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10139 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10140 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10141 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10142 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144 { 0xffffffff, 0 }
10145 };
10146 static const struct {
10147 char *name;
10148 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010149 u32 e1_mask;
10150 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010151 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010152 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10153 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10154 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10155 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10157 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010158
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010159 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010160 };
10161
10162 if (!netif_running(bp->dev))
10163 return rc;
10164
10165 /* Go through all the memories */
10166 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167 for (j = 0; j < mem_tbl[i].size; j++)
10168 REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170 /* Check the parity status */
10171 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010173 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010175 DP(NETIF_MSG_HW,
10176 "%s is 0x%x\n", prty_tbl[i].name, val);
10177 goto test_mem_exit;
10178 }
10179 }
10180
10181 rc = 0;
10182
10183test_mem_exit:
10184 return rc;
10185}
10186
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010187static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188{
10189 int cnt = 1000;
10190
10191 if (link_up)
10192 while (bnx2x_link_test(bp) && cnt--)
10193 msleep(10);
10194}
10195
10196static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197{
10198 unsigned int pkt_size, num_pkts, i;
10199 struct sk_buff *skb;
10200 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010201 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010202 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010203 u16 tx_start_idx, tx_idx;
10204 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010205 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010206 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010207 struct eth_tx_start_bd *tx_start_bd;
10208 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010209 dma_addr_t mapping;
10210 union eth_rx_cqe *cqe;
10211 u8 cqe_fp_flags;
10212 struct sw_rx_bd *rx_buf;
10213 u16 len;
10214 int rc = -ENODEV;
10215
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010216 /* check the loopback mode */
10217 switch (loopback_mode) {
10218 case BNX2X_PHY_LOOPBACK:
10219 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220 return -EINVAL;
10221 break;
10222 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010223 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010224 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010225 break;
10226 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010227 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010228 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010229
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010230 /* prepare the loopback packet */
10231 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010233 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234 if (!skb) {
10235 rc = -ENOMEM;
10236 goto test_loopback_exit;
10237 }
10238 packet = skb_put(skb, pkt_size);
10239 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010240 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010242 for (i = ETH_HLEN; i < pkt_size; i++)
10243 packet[i] = (unsigned char) (i & 0xff);
10244
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010245 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010246 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010247 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010249
Eilon Greensteinca003922009-08-12 22:53:28 -070010250 pkt_prod = fp_tx->tx_pkt_prod++;
10251 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010253 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010254 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010255
Eilon Greensteinca003922009-08-12 22:53:28 -070010256 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010258 mapping = pci_map_single(bp->pdev, skb->data,
10259 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269 /* turn on parsing and get a BD */
10270 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010274
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010275 wmb();
10276
Eilon Greensteinca003922009-08-12 22:53:28 -070010277 fp_tx->tx_db.data.prod += 2;
10278 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010279 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010280
10281 mmiowb();
10282
10283 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010284 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010285
10286 udelay(100);
10287
Eilon Greensteinca003922009-08-12 22:53:28 -070010288 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010289 if (tx_idx != tx_start_idx + num_pkts)
10290 goto test_loopback_exit;
10291
Eilon Greensteinca003922009-08-12 22:53:28 -070010292 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010293 if (rx_idx != rx_start_idx + num_pkts)
10294 goto test_loopback_exit;
10295
Eilon Greensteinca003922009-08-12 22:53:28 -070010296 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010297 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10298 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10299 goto test_loopback_rx_exit;
10300
10301 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10302 if (len != pkt_size)
10303 goto test_loopback_rx_exit;
10304
Eilon Greensteinca003922009-08-12 22:53:28 -070010305 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010306 skb = rx_buf->skb;
10307 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10310 goto test_loopback_rx_exit;
10311
10312 rc = 0;
10313
10314test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010315
Eilon Greensteinca003922009-08-12 22:53:28 -070010316 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10317 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10318 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10319 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010320
10321 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010322 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10323 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010324
10325test_loopback_exit:
10326 bp->link_params.loopback_mode = LOOPBACK_NONE;
10327
10328 return rc;
10329}
10330
10331static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10332{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010333 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010334
10335 if (!netif_running(bp->dev))
10336 return BNX2X_LOOPBACK_FAILED;
10337
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010338 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010339 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010340
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010341 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10342 if (res) {
10343 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10344 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010345 }
10346
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010347 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10348 if (res) {
10349 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10350 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010351 }
10352
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010353 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010354 bnx2x_netif_start(bp);
10355
10356 return rc;
10357}
10358
10359#define CRC32_RESIDUAL 0xdebb20e3
10360
10361static int bnx2x_test_nvram(struct bnx2x *bp)
10362{
10363 static const struct {
10364 int offset;
10365 int size;
10366 } nvram_tbl[] = {
10367 { 0, 0x14 }, /* bootstrap */
10368 { 0x14, 0xec }, /* dir */
10369 { 0x100, 0x350 }, /* manuf_info */
10370 { 0x450, 0xf0 }, /* feature_info */
10371 { 0x640, 0x64 }, /* upgrade_key_info */
10372 { 0x6a4, 0x64 },
10373 { 0x708, 0x70 }, /* manuf_key_info */
10374 { 0x778, 0x70 },
10375 { 0, 0 }
10376 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010377 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010378 u8 *data = (u8 *)buf;
10379 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010380 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010381
10382 rc = bnx2x_nvram_read(bp, 0, data, 4);
10383 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010384 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010385 goto test_nvram_exit;
10386 }
10387
10388 magic = be32_to_cpu(buf[0]);
10389 if (magic != 0x669955aa) {
10390 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10391 rc = -ENODEV;
10392 goto test_nvram_exit;
10393 }
10394
10395 for (i = 0; nvram_tbl[i].size; i++) {
10396
10397 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10398 nvram_tbl[i].size);
10399 if (rc) {
10400 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010401 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010402 goto test_nvram_exit;
10403 }
10404
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010405 crc = ether_crc_le(nvram_tbl[i].size, data);
10406 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010407 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010408 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010409 rc = -ENODEV;
10410 goto test_nvram_exit;
10411 }
10412 }
10413
10414test_nvram_exit:
10415 return rc;
10416}
10417
10418static int bnx2x_test_intr(struct bnx2x *bp)
10419{
10420 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10421 int i, rc;
10422
10423 if (!netif_running(bp->dev))
10424 return -ENODEV;
10425
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010426 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010427 if (CHIP_IS_E1(bp))
10428 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10429 else
10430 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010431 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010432 config->hdr.reserved1 = 0;
10433
Michael Chane665bfd2009-10-10 13:46:54 +000010434 bp->set_mac_pending++;
10435 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010436 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10437 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10438 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10439 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010440 for (i = 0; i < 10; i++) {
10441 if (!bp->set_mac_pending)
10442 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010443 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010444 msleep_interruptible(10);
10445 }
10446 if (i == 10)
10447 rc = -ENODEV;
10448 }
10449
10450 return rc;
10451}
10452
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010453static void bnx2x_self_test(struct net_device *dev,
10454 struct ethtool_test *etest, u64 *buf)
10455{
10456 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010457
10458 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10459
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010460 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010461 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010462
Eilon Greenstein33471622008-08-13 15:59:08 -070010463 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010464 if (IS_E1HMF(bp))
10465 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10466
10467 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010468 int port = BP_PORT(bp);
10469 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010470 u8 link_up;
10471
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010472 /* save current value of input enable for TX port IF */
10473 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10474 /* disable input for TX port IF */
10475 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10476
Eilon Greenstein061bc702009-10-15 00:18:47 -070010477 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010478 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10479 bnx2x_nic_load(bp, LOAD_DIAG);
10480 /* wait until link state is restored */
10481 bnx2x_wait_for_link(bp, link_up);
10482
10483 if (bnx2x_test_registers(bp) != 0) {
10484 buf[0] = 1;
10485 etest->flags |= ETH_TEST_FL_FAILED;
10486 }
10487 if (bnx2x_test_memory(bp) != 0) {
10488 buf[1] = 1;
10489 etest->flags |= ETH_TEST_FL_FAILED;
10490 }
10491 buf[2] = bnx2x_test_loopback(bp, link_up);
10492 if (buf[2] != 0)
10493 etest->flags |= ETH_TEST_FL_FAILED;
10494
10495 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010496
10497 /* restore input for TX port IF */
10498 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10499
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010500 bnx2x_nic_load(bp, LOAD_NORMAL);
10501 /* wait until link state is restored */
10502 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010503 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010504 if (bnx2x_test_nvram(bp) != 0) {
10505 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010506 etest->flags |= ETH_TEST_FL_FAILED;
10507 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010508 if (bnx2x_test_intr(bp) != 0) {
10509 buf[4] = 1;
10510 etest->flags |= ETH_TEST_FL_FAILED;
10511 }
10512 if (bp->port.pmf)
10513 if (bnx2x_link_test(bp) != 0) {
10514 buf[5] = 1;
10515 etest->flags |= ETH_TEST_FL_FAILED;
10516 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010517
10518#ifdef BNX2X_EXTRA_DEBUG
10519 bnx2x_panic_dump(bp);
10520#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010521}
10522
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010523static const struct {
10524 long offset;
10525 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010526 u8 string[ETH_GSTRING_LEN];
10527} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10528/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10529 { Q_STATS_OFFSET32(error_bytes_received_hi),
10530 8, "[%d]: rx_error_bytes" },
10531 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10532 8, "[%d]: rx_ucast_packets" },
10533 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10534 8, "[%d]: rx_mcast_packets" },
10535 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10536 8, "[%d]: rx_bcast_packets" },
10537 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10538 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10539 4, "[%d]: rx_phy_ip_err_discards"},
10540 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10541 4, "[%d]: rx_skb_alloc_discard" },
10542 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10543
10544/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10545 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10546 8, "[%d]: tx_packets" }
10547};
10548
10549static const struct {
10550 long offset;
10551 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010552 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010553#define STATS_FLAGS_PORT 1
10554#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010555#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010556 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010557} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010558/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10559 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010560 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010561 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010562 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010563 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010564 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010565 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010566 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010567 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010568 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010569 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010570 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010571 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010572 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10573 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10574 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10575 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10576/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10577 8, STATS_FLAGS_PORT, "rx_fragments" },
10578 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10579 8, STATS_FLAGS_PORT, "rx_jabbers" },
10580 { STATS_OFFSET32(no_buff_discard_hi),
10581 8, STATS_FLAGS_BOTH, "rx_discards" },
10582 { STATS_OFFSET32(mac_filter_discard),
10583 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10584 { STATS_OFFSET32(xxoverflow_discard),
10585 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10586 { STATS_OFFSET32(brb_drop_hi),
10587 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10588 { STATS_OFFSET32(brb_truncate_hi),
10589 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10590 { STATS_OFFSET32(pause_frames_received_hi),
10591 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10592 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10593 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10594 { STATS_OFFSET32(nig_timer_max),
10595 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10596/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10597 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10598 { STATS_OFFSET32(rx_skb_alloc_failed),
10599 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10600 { STATS_OFFSET32(hw_csum_err),
10601 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10602
10603 { STATS_OFFSET32(total_bytes_transmitted_hi),
10604 8, STATS_FLAGS_BOTH, "tx_bytes" },
10605 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10606 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10607 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10608 8, STATS_FLAGS_BOTH, "tx_packets" },
10609 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10610 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10611 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10612 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010613 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010614 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010615 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010616 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010617/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010618 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010619 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010620 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010621 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010622 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010623 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010624 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010625 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010626 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010627 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010628 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010629 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010630 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010631 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010632 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010633 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010634 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010635 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010636 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010637/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010638 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010639 { STATS_OFFSET32(pause_frames_sent_hi),
10640 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010641};
10642
Eilon Greensteinde832a52009-02-12 08:36:33 +000010643#define IS_PORT_STAT(i) \
10644 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10645#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10646#define IS_E1HMF_MODE_STAT(bp) \
10647 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010648
Ben Hutchings15f0a392009-10-01 11:58:24 +000010649static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10650{
10651 struct bnx2x *bp = netdev_priv(dev);
10652 int i, num_stats;
10653
10654 switch(stringset) {
10655 case ETH_SS_STATS:
10656 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010657 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000010658 if (!IS_E1HMF_MODE_STAT(bp))
10659 num_stats += BNX2X_NUM_STATS;
10660 } else {
10661 if (IS_E1HMF_MODE_STAT(bp)) {
10662 num_stats = 0;
10663 for (i = 0; i < BNX2X_NUM_STATS; i++)
10664 if (IS_FUNC_STAT(i))
10665 num_stats++;
10666 } else
10667 num_stats = BNX2X_NUM_STATS;
10668 }
10669 return num_stats;
10670
10671 case ETH_SS_TEST:
10672 return BNX2X_NUM_TESTS;
10673
10674 default:
10675 return -EINVAL;
10676 }
10677}
10678
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010679static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10680{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010681 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010682 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010683
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010684 switch (stringset) {
10685 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010686 if (is_multi(bp)) {
10687 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010688 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010689 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10690 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10691 bnx2x_q_stats_arr[j].string, i);
10692 k += BNX2X_NUM_Q_STATS;
10693 }
10694 if (IS_E1HMF_MODE_STAT(bp))
10695 break;
10696 for (j = 0; j < BNX2X_NUM_STATS; j++)
10697 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10698 bnx2x_stats_arr[j].string);
10699 } else {
10700 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10701 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10702 continue;
10703 strcpy(buf + j*ETH_GSTRING_LEN,
10704 bnx2x_stats_arr[i].string);
10705 j++;
10706 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010707 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010708 break;
10709
10710 case ETH_SS_TEST:
10711 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10712 break;
10713 }
10714}
10715
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010716static void bnx2x_get_ethtool_stats(struct net_device *dev,
10717 struct ethtool_stats *stats, u64 *buf)
10718{
10719 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010720 u32 *hw_stats, *offset;
10721 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010722
Eilon Greensteinde832a52009-02-12 08:36:33 +000010723 if (is_multi(bp)) {
10724 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010725 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010726 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10727 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10728 if (bnx2x_q_stats_arr[j].size == 0) {
10729 /* skip this counter */
10730 buf[k + j] = 0;
10731 continue;
10732 }
10733 offset = (hw_stats +
10734 bnx2x_q_stats_arr[j].offset);
10735 if (bnx2x_q_stats_arr[j].size == 4) {
10736 /* 4-byte counter */
10737 buf[k + j] = (u64) *offset;
10738 continue;
10739 }
10740 /* 8-byte counter */
10741 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10742 }
10743 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010744 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010745 if (IS_E1HMF_MODE_STAT(bp))
10746 return;
10747 hw_stats = (u32 *)&bp->eth_stats;
10748 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10749 if (bnx2x_stats_arr[j].size == 0) {
10750 /* skip this counter */
10751 buf[k + j] = 0;
10752 continue;
10753 }
10754 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10755 if (bnx2x_stats_arr[j].size == 4) {
10756 /* 4-byte counter */
10757 buf[k + j] = (u64) *offset;
10758 continue;
10759 }
10760 /* 8-byte counter */
10761 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010762 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010763 } else {
10764 hw_stats = (u32 *)&bp->eth_stats;
10765 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10766 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10767 continue;
10768 if (bnx2x_stats_arr[i].size == 0) {
10769 /* skip this counter */
10770 buf[j] = 0;
10771 j++;
10772 continue;
10773 }
10774 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10775 if (bnx2x_stats_arr[i].size == 4) {
10776 /* 4-byte counter */
10777 buf[j] = (u64) *offset;
10778 j++;
10779 continue;
10780 }
10781 /* 8-byte counter */
10782 buf[j] = HILO_U64(*offset, *(offset + 1));
10783 j++;
10784 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010785 }
10786}
10787
10788static int bnx2x_phys_id(struct net_device *dev, u32 data)
10789{
10790 struct bnx2x *bp = netdev_priv(dev);
10791 int i;
10792
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010793 if (!netif_running(dev))
10794 return 0;
10795
10796 if (!bp->port.pmf)
10797 return 0;
10798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010799 if (data == 0)
10800 data = 2;
10801
10802 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010803 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010804 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10805 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010806 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010807 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010808
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010809 msleep_interruptible(500);
10810 if (signal_pending(current))
10811 break;
10812 }
10813
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010814 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010815 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10816 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010817
10818 return 0;
10819}
10820
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010821static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010822 .get_settings = bnx2x_get_settings,
10823 .set_settings = bnx2x_set_settings,
10824 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010825 .get_regs_len = bnx2x_get_regs_len,
10826 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010827 .get_wol = bnx2x_get_wol,
10828 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010829 .get_msglevel = bnx2x_get_msglevel,
10830 .set_msglevel = bnx2x_set_msglevel,
10831 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010832 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010833 .get_eeprom_len = bnx2x_get_eeprom_len,
10834 .get_eeprom = bnx2x_get_eeprom,
10835 .set_eeprom = bnx2x_set_eeprom,
10836 .get_coalesce = bnx2x_get_coalesce,
10837 .set_coalesce = bnx2x_set_coalesce,
10838 .get_ringparam = bnx2x_get_ringparam,
10839 .set_ringparam = bnx2x_set_ringparam,
10840 .get_pauseparam = bnx2x_get_pauseparam,
10841 .set_pauseparam = bnx2x_set_pauseparam,
10842 .get_rx_csum = bnx2x_get_rx_csum,
10843 .set_rx_csum = bnx2x_set_rx_csum,
10844 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010845 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010846 .set_flags = bnx2x_set_flags,
10847 .get_flags = ethtool_op_get_flags,
10848 .get_sg = ethtool_op_get_sg,
10849 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010850 .get_tso = ethtool_op_get_tso,
10851 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010852 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010853 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010854 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010855 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010856 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010857};
10858
10859/* end of ethtool_ops */
10860
10861/****************************************************************************
10862* General service functions
10863****************************************************************************/
10864
10865static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10866{
10867 u16 pmcsr;
10868
10869 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10870
10871 switch (state) {
10872 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010873 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010874 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10875 PCI_PM_CTRL_PME_STATUS));
10876
10877 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010878 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010879 msleep(20);
10880 break;
10881
10882 case PCI_D3hot:
10883 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10884 pmcsr |= 3;
10885
10886 if (bp->wol)
10887 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10888
10889 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10890 pmcsr);
10891
10892 /* No more memory access after this point until
10893 * device is brought back to D0.
10894 */
10895 break;
10896
10897 default:
10898 return -EINVAL;
10899 }
10900 return 0;
10901}
10902
Eilon Greenstein237907c2009-01-14 06:42:44 +000010903static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10904{
10905 u16 rx_cons_sb;
10906
10907 /* Tell compiler that status block fields can change */
10908 barrier();
10909 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10910 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10911 rx_cons_sb++;
10912 return (fp->rx_comp_cons != rx_cons_sb);
10913}
10914
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010915/*
10916 * net_device service functions
10917 */
10918
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010919static int bnx2x_poll(struct napi_struct *napi, int budget)
10920{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010921 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010922 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10923 napi);
10924 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010925
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010926 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010927#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010928 if (unlikely(bp->panic)) {
10929 napi_complete(napi);
10930 return 0;
10931 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010932#endif
10933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010934 if (bnx2x_has_tx_work(fp))
10935 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010936
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010937 if (bnx2x_has_rx_work(fp)) {
10938 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010939
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010940 /* must not complete if we consumed full budget */
10941 if (work_done >= budget)
10942 break;
10943 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010944
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010945 /* Fall out from the NAPI loop if needed */
10946 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10947 bnx2x_update_fpsb_idx(fp);
10948 /* bnx2x_has_rx_work() reads the status block, thus we need
10949 * to ensure that status block indices have been actually read
10950 * (bnx2x_update_fpsb_idx) prior to this check
10951 * (bnx2x_has_rx_work) so that we won't write the "newer"
10952 * value of the status block to IGU (if there was a DMA right
10953 * after bnx2x_has_rx_work and if there is no rmb, the memory
10954 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10955 * before bnx2x_ack_sb). In this case there will never be
10956 * another interrupt until there is another update of the
10957 * status block, while there is still unhandled work.
10958 */
10959 rmb();
10960
10961 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10962 napi_complete(napi);
10963 /* Re-enable interrupts */
10964 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10965 le16_to_cpu(fp->fp_c_idx),
10966 IGU_INT_NOP, 1);
10967 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10968 le16_to_cpu(fp->fp_u_idx),
10969 IGU_INT_ENABLE, 1);
10970 break;
10971 }
10972 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000010973 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010974
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010975 return work_done;
10976}
10977
Eilon Greenstein755735e2008-06-23 20:35:13 -070010978
10979/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010980 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070010981 * we use one mapping for both BDs
10982 * So far this has only been observed to happen
10983 * in Other Operating Systems(TM)
10984 */
10985static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10986 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070010987 struct sw_tx_bd *tx_buf,
10988 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010989 u16 bd_prod, int nbd)
10990{
Eilon Greensteinca003922009-08-12 22:53:28 -070010991 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010992 struct eth_tx_bd *d_tx_bd;
10993 dma_addr_t mapping;
10994 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10995
10996 /* first fix first BD */
10997 h_tx_bd->nbd = cpu_to_le16(nbd);
10998 h_tx_bd->nbytes = cpu_to_le16(hlen);
10999
11000 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11001 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11002 h_tx_bd->addr_lo, h_tx_bd->nbd);
11003
11004 /* now get a new data BD
11005 * (after the pbd) and fill it */
11006 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011007 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011008
11009 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11010 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11011
11012 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11013 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11014 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011015
11016 /* this marks the BD as one that has no individual mapping */
11017 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11018
Eilon Greenstein755735e2008-06-23 20:35:13 -070011019 DP(NETIF_MSG_TX_QUEUED,
11020 "TSO split data size is %d (%x:%x)\n",
11021 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11022
Eilon Greensteinca003922009-08-12 22:53:28 -070011023 /* update tx_bd */
11024 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011025
11026 return bd_prod;
11027}
11028
11029static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11030{
11031 if (fix > 0)
11032 csum = (u16) ~csum_fold(csum_sub(csum,
11033 csum_partial(t_header - fix, fix, 0)));
11034
11035 else if (fix < 0)
11036 csum = (u16) ~csum_fold(csum_add(csum,
11037 csum_partial(t_header, -fix, 0)));
11038
11039 return swab16(csum);
11040}
11041
11042static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11043{
11044 u32 rc;
11045
11046 if (skb->ip_summed != CHECKSUM_PARTIAL)
11047 rc = XMIT_PLAIN;
11048
11049 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011050 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070011051 rc = XMIT_CSUM_V6;
11052 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11053 rc |= XMIT_CSUM_TCP;
11054
11055 } else {
11056 rc = XMIT_CSUM_V4;
11057 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11058 rc |= XMIT_CSUM_TCP;
11059 }
11060 }
11061
11062 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011063 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011064
11065 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011066 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011067
11068 return rc;
11069}
11070
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011071#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011072/* check if packet requires linearization (packet is too fragmented)
11073 no need to check fragmentation if page size > 8K (there will be no
11074 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011075static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11076 u32 xmit_type)
11077{
11078 int to_copy = 0;
11079 int hlen = 0;
11080 int first_bd_sz = 0;
11081
11082 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11083 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11084
11085 if (xmit_type & XMIT_GSO) {
11086 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11087 /* Check if LSO packet needs to be copied:
11088 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11089 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011090 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011091 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11092 int wnd_idx = 0;
11093 int frag_idx = 0;
11094 u32 wnd_sum = 0;
11095
11096 /* Headers length */
11097 hlen = (int)(skb_transport_header(skb) - skb->data) +
11098 tcp_hdrlen(skb);
11099
11100 /* Amount of data (w/o headers) on linear part of SKB*/
11101 first_bd_sz = skb_headlen(skb) - hlen;
11102
11103 wnd_sum = first_bd_sz;
11104
11105 /* Calculate the first sum - it's special */
11106 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11107 wnd_sum +=
11108 skb_shinfo(skb)->frags[frag_idx].size;
11109
11110 /* If there was data on linear skb data - check it */
11111 if (first_bd_sz > 0) {
11112 if (unlikely(wnd_sum < lso_mss)) {
11113 to_copy = 1;
11114 goto exit_lbl;
11115 }
11116
11117 wnd_sum -= first_bd_sz;
11118 }
11119
11120 /* Others are easier: run through the frag list and
11121 check all windows */
11122 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11123 wnd_sum +=
11124 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11125
11126 if (unlikely(wnd_sum < lso_mss)) {
11127 to_copy = 1;
11128 break;
11129 }
11130 wnd_sum -=
11131 skb_shinfo(skb)->frags[wnd_idx].size;
11132 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070011133 } else {
11134 /* in non-LSO too fragmented packet should always
11135 be linearized */
11136 to_copy = 1;
11137 }
11138 }
11139
11140exit_lbl:
11141 if (unlikely(to_copy))
11142 DP(NETIF_MSG_TX_QUEUED,
11143 "Linearization IS REQUIRED for %s packet. "
11144 "num_frags %d hlen %d first_bd_sz %d\n",
11145 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11146 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11147
11148 return to_copy;
11149}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011150#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011151
11152/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011153 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070011154 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011155 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011156static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011157{
11158 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011159 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011160 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011161 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011162 struct eth_tx_start_bd *tx_start_bd;
11163 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011164 struct eth_tx_parse_bd *pbd = NULL;
11165 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011166 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011167 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011168 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011169 int i;
11170 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011171 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011172
11173#ifdef BNX2X_STOP_ON_ERROR
11174 if (unlikely(bp->panic))
11175 return NETDEV_TX_BUSY;
11176#endif
11177
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011178 fp_index = skb_get_queue_mapping(skb);
11179 txq = netdev_get_tx_queue(dev, fp_index);
11180
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011181 fp = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070011182
Yitchak Gertner231fd582008-08-25 15:27:06 -070011183 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011184 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011185 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011186 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11187 return NETDEV_TX_BUSY;
11188 }
11189
Eilon Greenstein755735e2008-06-23 20:35:13 -070011190 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11191 " gso type %x xmit_type %x\n",
11192 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11193 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11194
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011195#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011196 /* First, check if we need to linearize the skb (due to FW
11197 restrictions). No need to check fragmentation if page size > 8K
11198 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011199 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11200 /* Statistics of linearization */
11201 bp->lin_cnt++;
11202 if (skb_linearize(skb) != 0) {
11203 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11204 "silently dropping this SKB\n");
11205 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011206 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011207 }
11208 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011209#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011210
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011211 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070011212 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011213 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735e2008-06-23 20:35:13 -070011214 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011215 (don't forget to mark the last one as last,
11216 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070011217 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011218 */
11219
11220 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011221 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011222
Eilon Greenstein755735e2008-06-23 20:35:13 -070011223 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011224 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011225 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011226
Eilon Greensteinca003922009-08-12 22:53:28 -070011227 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11228 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11229 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011230 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011231 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011232
Eilon Greenstein755735e2008-06-23 20:35:13 -070011233 /* remember the first BD of the packet */
11234 tx_buf->first_bd = fp->tx_bd_prod;
11235 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011236 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011237
11238 DP(NETIF_MSG_TX_QUEUED,
11239 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011240 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011241
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011242#ifdef BCM_VLAN
11243 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11244 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011245 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11246 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011247 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011248#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011249 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011250
Eilon Greensteinca003922009-08-12 22:53:28 -070011251 /* turn on parsing and get a BD */
11252 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11253 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011254
Eilon Greensteinca003922009-08-12 22:53:28 -070011255 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011256
11257 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011258 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011259
11260 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011261 pbd->global_data =
11262 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11263 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011264
11265 pbd->ip_hlen = (skb_transport_header(skb) -
11266 skb_network_header(skb)) / 2;
11267
11268 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11269
11270 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011271 hlen = hlen*2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011272
Eilon Greensteinca003922009-08-12 22:53:28 -070011273 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011274
11275 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011276 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735e2008-06-23 20:35:13 -070011277 ETH_TX_BD_FLAGS_IP_CSUM;
11278 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011279 tx_start_bd->bd_flags.as_bitfield |=
11280 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011281
11282 if (xmit_type & XMIT_CSUM_TCP) {
11283 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11284
11285 } else {
11286 s8 fix = SKB_CS_OFF(skb); /* signed! */
11287
Eilon Greensteinca003922009-08-12 22:53:28 -070011288 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011289
11290 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011291 "hlen %d fix %d csum before fix %x\n",
11292 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011293
11294 /* HW bug: fixup the CSUM */
11295 pbd->tcp_pseudo_csum =
11296 bnx2x_csum_fix(skb_transport_header(skb),
11297 SKB_CS(skb), fix);
11298
11299 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11300 pbd->tcp_pseudo_csum);
11301 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011302 }
11303
11304 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011305 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011306
Eilon Greensteinca003922009-08-12 22:53:28 -070011307 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11308 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11309 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11310 tx_start_bd->nbd = cpu_to_le16(nbd);
11311 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11312 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011313
11314 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070011315 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011316 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11317 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11318 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011319
Eilon Greenstein755735e2008-06-23 20:35:13 -070011320 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011321
11322 DP(NETIF_MSG_TX_QUEUED,
11323 "TSO packet len %d hlen %d total len %d tso size %d\n",
11324 skb->len, hlen, skb_headlen(skb),
11325 skb_shinfo(skb)->gso_size);
11326
Eilon Greensteinca003922009-08-12 22:53:28 -070011327 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011328
Eilon Greenstein755735e2008-06-23 20:35:13 -070011329 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011330 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11331 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011332
11333 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11334 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011335 pbd->tcp_flags = pbd_tcp_flags(skb);
11336
11337 if (xmit_type & XMIT_GSO_V4) {
11338 pbd->ip_id = swab16(ip_hdr(skb)->id);
11339 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011340 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11341 ip_hdr(skb)->daddr,
11342 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011343
11344 } else
11345 pbd->tcp_pseudo_csum =
11346 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11347 &ipv6_hdr(skb)->daddr,
11348 0, IPPROTO_TCP, 0));
11349
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011350 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11351 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011352 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011353
Eilon Greenstein755735e2008-06-23 20:35:13 -070011354 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11355 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011356
Eilon Greenstein755735e2008-06-23 20:35:13 -070011357 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011358 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11359 if (total_pkt_bd == NULL)
11360 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011361
Eilon Greenstein755735e2008-06-23 20:35:13 -070011362 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11363 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011364
Eilon Greensteinca003922009-08-12 22:53:28 -070011365 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11368 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011369
Eilon Greenstein755735e2008-06-23 20:35:13 -070011370 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011371 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11372 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11373 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011374 }
11375
Eilon Greensteinca003922009-08-12 22:53:28 -070011376 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011377
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011378 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11379
Eilon Greenstein755735e2008-06-23 20:35:13 -070011380 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011381 * if the packet contains or ends with it
11382 */
11383 if (TX_BD_POFF(bd_prod) < nbd)
11384 nbd++;
11385
Eilon Greensteinca003922009-08-12 22:53:28 -070011386 if (total_pkt_bd != NULL)
11387 total_pkt_bd->total_pkt_bytes = pkt_size;
11388
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011389 if (pbd)
11390 DP(NETIF_MSG_TX_QUEUED,
11391 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11392 " tcp_flags %x xsum %x seq %u hlen %u\n",
11393 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11394 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011395 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011396
Eilon Greenstein755735e2008-06-23 20:35:13 -070011397 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011398
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011399 /*
11400 * Make sure that the BD data is updated before updating the producer
11401 * since FW might read the BD right after the producer is updated.
11402 * This is only applicable for weak-ordered memory model archs such
11403 * as IA-64. The following barrier is also mandatory since FW will
11404 * assumes packets must have BDs.
11405 */
11406 wmb();
11407
Eilon Greensteinca003922009-08-12 22:53:28 -070011408 fp->tx_db.data.prod += nbd;
11409 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011410 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011411
11412 mmiowb();
11413
Eilon Greenstein755735e2008-06-23 20:35:13 -070011414 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011415
11416 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011417 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011418 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11419 if we put Tx into XOFF state. */
11420 smp_mb();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011421 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011422 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011423 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011424 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011425 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011426
11427 return NETDEV_TX_OK;
11428}
11429
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011430/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011431static int bnx2x_open(struct net_device *dev)
11432{
11433 struct bnx2x *bp = netdev_priv(dev);
11434
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011435 netif_carrier_off(dev);
11436
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011437 bnx2x_set_power_state(bp, PCI_D0);
11438
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011439 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011440}
11441
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011442/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011443static int bnx2x_close(struct net_device *dev)
11444{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011445 struct bnx2x *bp = netdev_priv(dev);
11446
11447 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011448 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11449 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11450 if (!CHIP_REV_IS_SLOW(bp))
11451 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011452
11453 return 0;
11454}
11455
Eilon Greensteinf5372252009-02-12 08:38:30 +000011456/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011457static void bnx2x_set_rx_mode(struct net_device *dev)
11458{
11459 struct bnx2x *bp = netdev_priv(dev);
11460 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11461 int port = BP_PORT(bp);
11462
11463 if (bp->state != BNX2X_STATE_OPEN) {
11464 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11465 return;
11466 }
11467
11468 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11469
11470 if (dev->flags & IFF_PROMISC)
11471 rx_mode = BNX2X_RX_MODE_PROMISC;
11472
11473 else if ((dev->flags & IFF_ALLMULTI) ||
11474 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11475 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11476
11477 else { /* some multicasts */
11478 if (CHIP_IS_E1(bp)) {
11479 int i, old, offset;
11480 struct dev_mc_list *mclist;
11481 struct mac_configuration_cmd *config =
11482 bnx2x_sp(bp, mcast_config);
11483
11484 for (i = 0, mclist = dev->mc_list;
11485 mclist && (i < dev->mc_count);
11486 i++, mclist = mclist->next) {
11487
11488 config->config_table[i].
11489 cam_entry.msb_mac_addr =
11490 swab16(*(u16 *)&mclist->dmi_addr[0]);
11491 config->config_table[i].
11492 cam_entry.middle_mac_addr =
11493 swab16(*(u16 *)&mclist->dmi_addr[2]);
11494 config->config_table[i].
11495 cam_entry.lsb_mac_addr =
11496 swab16(*(u16 *)&mclist->dmi_addr[4]);
11497 config->config_table[i].cam_entry.flags =
11498 cpu_to_le16(port);
11499 config->config_table[i].
11500 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011501 config->config_table[i].target_table_entry.
11502 clients_bit_vector =
11503 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011504 config->config_table[i].
11505 target_table_entry.vlan_id = 0;
11506
11507 DP(NETIF_MSG_IFUP,
11508 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11509 config->config_table[i].
11510 cam_entry.msb_mac_addr,
11511 config->config_table[i].
11512 cam_entry.middle_mac_addr,
11513 config->config_table[i].
11514 cam_entry.lsb_mac_addr);
11515 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011516 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011517 if (old > i) {
11518 for (; i < old; i++) {
11519 if (CAM_IS_INVALID(config->
11520 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011521 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011522 break;
11523 }
11524 /* invalidate */
11525 CAM_INVALIDATE(config->
11526 config_table[i]);
11527 }
11528 }
11529
11530 if (CHIP_REV_IS_SLOW(bp))
11531 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11532 else
11533 offset = BNX2X_MAX_MULTICAST*(1 + port);
11534
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011535 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011536 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011537 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011538 config->hdr.reserved1 = 0;
11539
Michael Chane665bfd2009-10-10 13:46:54 +000011540 bp->set_mac_pending++;
11541 smp_wmb();
11542
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011543 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11544 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11545 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11546 0);
11547 } else { /* E1H */
11548 /* Accept one or more multicasts */
11549 struct dev_mc_list *mclist;
11550 u32 mc_filter[MC_HASH_SIZE];
11551 u32 crc, bit, regidx;
11552 int i;
11553
11554 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11555
11556 for (i = 0, mclist = dev->mc_list;
11557 mclist && (i < dev->mc_count);
11558 i++, mclist = mclist->next) {
11559
Johannes Berg7c510e42008-10-27 17:47:26 -070011560 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11561 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011562
11563 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11564 bit = (crc >> 24) & 0xff;
11565 regidx = bit >> 5;
11566 bit &= 0x1f;
11567 mc_filter[regidx] |= (1 << bit);
11568 }
11569
11570 for (i = 0; i < MC_HASH_SIZE; i++)
11571 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11572 mc_filter[i]);
11573 }
11574 }
11575
11576 bp->rx_mode = rx_mode;
11577 bnx2x_set_storm_rx_mode(bp);
11578}
11579
11580/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011581static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11582{
11583 struct sockaddr *addr = p;
11584 struct bnx2x *bp = netdev_priv(dev);
11585
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011586 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011587 return -EINVAL;
11588
11589 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011590 if (netif_running(dev)) {
11591 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011592 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011593 else
Michael Chane665bfd2009-10-10 13:46:54 +000011594 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011595 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011596
11597 return 0;
11598}
11599
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011600/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011601static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11602 int devad, u16 addr)
11603{
11604 struct bnx2x *bp = netdev_priv(netdev);
11605 u16 value;
11606 int rc;
11607 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11608
11609 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11610 prtad, devad, addr);
11611
11612 if (prtad != bp->mdio.prtad) {
11613 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11614 prtad, bp->mdio.prtad);
11615 return -EINVAL;
11616 }
11617
11618 /* The HW expects different devad if CL22 is used */
11619 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11620
11621 bnx2x_acquire_phy_lock(bp);
11622 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11623 devad, addr, &value);
11624 bnx2x_release_phy_lock(bp);
11625 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11626
11627 if (!rc)
11628 rc = value;
11629 return rc;
11630}
11631
11632/* called with rtnl_lock */
11633static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11634 u16 addr, u16 value)
11635{
11636 struct bnx2x *bp = netdev_priv(netdev);
11637 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11638 int rc;
11639
11640 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11641 " value 0x%x\n", prtad, devad, addr, value);
11642
11643 if (prtad != bp->mdio.prtad) {
11644 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11645 prtad, bp->mdio.prtad);
11646 return -EINVAL;
11647 }
11648
11649 /* The HW expects different devad if CL22 is used */
11650 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11651
11652 bnx2x_acquire_phy_lock(bp);
11653 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11654 devad, addr, value);
11655 bnx2x_release_phy_lock(bp);
11656 return rc;
11657}
11658
11659/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011660static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11661{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011662 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011663 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011664
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011665 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11666 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011667
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011668 if (!netif_running(dev))
11669 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011670
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011671 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011672}
11673
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011674/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011675static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11676{
11677 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011678 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011679
11680 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11681 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11682 return -EINVAL;
11683
11684 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011685 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011686 * only updated as part of load
11687 */
11688 dev->mtu = new_mtu;
11689
11690 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011691 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11692 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011693 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011694
11695 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011696}
11697
11698static void bnx2x_tx_timeout(struct net_device *dev)
11699{
11700 struct bnx2x *bp = netdev_priv(dev);
11701
11702#ifdef BNX2X_STOP_ON_ERROR
11703 if (!bp->panic)
11704 bnx2x_panic();
11705#endif
11706 /* This allows the netif to be shutdown gracefully before resetting */
11707 schedule_work(&bp->reset_task);
11708}
11709
11710#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011711/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011712static void bnx2x_vlan_rx_register(struct net_device *dev,
11713 struct vlan_group *vlgrp)
11714{
11715 struct bnx2x *bp = netdev_priv(dev);
11716
11717 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011718
11719 /* Set flags according to the required capabilities */
11720 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11721
11722 if (dev->features & NETIF_F_HW_VLAN_TX)
11723 bp->flags |= HW_VLAN_TX_FLAG;
11724
11725 if (dev->features & NETIF_F_HW_VLAN_RX)
11726 bp->flags |= HW_VLAN_RX_FLAG;
11727
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011728 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011729 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011730}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011731
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011732#endif
11733
11734#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11735static void poll_bnx2x(struct net_device *dev)
11736{
11737 struct bnx2x *bp = netdev_priv(dev);
11738
11739 disable_irq(bp->pdev->irq);
11740 bnx2x_interrupt(bp->pdev->irq, dev);
11741 enable_irq(bp->pdev->irq);
11742}
11743#endif
11744
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011745static const struct net_device_ops bnx2x_netdev_ops = {
11746 .ndo_open = bnx2x_open,
11747 .ndo_stop = bnx2x_close,
11748 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011749 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011750 .ndo_set_mac_address = bnx2x_change_mac_addr,
11751 .ndo_validate_addr = eth_validate_addr,
11752 .ndo_do_ioctl = bnx2x_ioctl,
11753 .ndo_change_mtu = bnx2x_change_mtu,
11754 .ndo_tx_timeout = bnx2x_tx_timeout,
11755#ifdef BCM_VLAN
11756 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11757#endif
11758#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11759 .ndo_poll_controller = poll_bnx2x,
11760#endif
11761};
11762
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011763static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11764 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011765{
11766 struct bnx2x *bp;
11767 int rc;
11768
11769 SET_NETDEV_DEV(dev, &pdev->dev);
11770 bp = netdev_priv(dev);
11771
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011772 bp->dev = dev;
11773 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011774 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011775 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011776
11777 rc = pci_enable_device(pdev);
11778 if (rc) {
11779 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11780 goto err_out;
11781 }
11782
11783 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11784 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11785 " aborting\n");
11786 rc = -ENODEV;
11787 goto err_out_disable;
11788 }
11789
11790 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11791 printk(KERN_ERR PFX "Cannot find second PCI device"
11792 " base address, aborting\n");
11793 rc = -ENODEV;
11794 goto err_out_disable;
11795 }
11796
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011797 if (atomic_read(&pdev->enable_cnt) == 1) {
11798 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11799 if (rc) {
11800 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11801 " aborting\n");
11802 goto err_out_disable;
11803 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011804
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011805 pci_set_master(pdev);
11806 pci_save_state(pdev);
11807 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011808
11809 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11810 if (bp->pm_cap == 0) {
11811 printk(KERN_ERR PFX "Cannot find power management"
11812 " capability, aborting\n");
11813 rc = -EIO;
11814 goto err_out_release;
11815 }
11816
11817 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11818 if (bp->pcie_cap == 0) {
11819 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11820 " aborting\n");
11821 rc = -EIO;
11822 goto err_out_release;
11823 }
11824
Yang Hongyang6a355282009-04-06 19:01:13 -070011825 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011826 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011827 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011828 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11829 " failed, aborting\n");
11830 rc = -EIO;
11831 goto err_out_release;
11832 }
11833
Yang Hongyang284901a2009-04-06 19:01:15 -070011834 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011835 printk(KERN_ERR PFX "System does not support DMA,"
11836 " aborting\n");
11837 rc = -EIO;
11838 goto err_out_release;
11839 }
11840
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011841 dev->mem_start = pci_resource_start(pdev, 0);
11842 dev->base_addr = dev->mem_start;
11843 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011844
11845 dev->irq = pdev->irq;
11846
Arjan van de Ven275f1652008-10-20 21:42:39 -070011847 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011848 if (!bp->regview) {
11849 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11850 rc = -ENOMEM;
11851 goto err_out_release;
11852 }
11853
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011854 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11855 min_t(u64, BNX2X_DB_SIZE,
11856 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011857 if (!bp->doorbells) {
11858 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11859 rc = -ENOMEM;
11860 goto err_out_unmap;
11861 }
11862
11863 bnx2x_set_power_state(bp, PCI_D0);
11864
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011865 /* clean indirect addresses */
11866 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11867 PCICFG_VENDOR_ID_OFFSET);
11868 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11869 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11870 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11871 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011872
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011873 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011874
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011875 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011876 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011877 dev->features |= NETIF_F_SG;
11878 dev->features |= NETIF_F_HW_CSUM;
11879 if (bp->flags & USING_DAC_FLAG)
11880 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011881 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11882 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011883#ifdef BCM_VLAN
11884 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011885 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011886
11887 dev->vlan_features |= NETIF_F_SG;
11888 dev->vlan_features |= NETIF_F_HW_CSUM;
11889 if (bp->flags & USING_DAC_FLAG)
11890 dev->vlan_features |= NETIF_F_HIGHDMA;
11891 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11892 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011893#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011894
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011895 /* get_port_hwinfo() will set prtad and mmds properly */
11896 bp->mdio.prtad = MDIO_PRTAD_NONE;
11897 bp->mdio.mmds = 0;
11898 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11899 bp->mdio.dev = dev;
11900 bp->mdio.mdio_read = bnx2x_mdio_read;
11901 bp->mdio.mdio_write = bnx2x_mdio_write;
11902
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011903 return 0;
11904
11905err_out_unmap:
11906 if (bp->regview) {
11907 iounmap(bp->regview);
11908 bp->regview = NULL;
11909 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011910 if (bp->doorbells) {
11911 iounmap(bp->doorbells);
11912 bp->doorbells = NULL;
11913 }
11914
11915err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011916 if (atomic_read(&pdev->enable_cnt) == 1)
11917 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011918
11919err_out_disable:
11920 pci_disable_device(pdev);
11921 pci_set_drvdata(pdev, NULL);
11922
11923err_out:
11924 return rc;
11925}
11926
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011927static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11928 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011929{
11930 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11931
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011932 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11933
11934 /* return value of 1=2.5GHz 2=5GHz */
11935 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011936}
11937
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011938static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11939{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011940 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011941 struct bnx2x_fw_file_hdr *fw_hdr;
11942 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011943 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011944 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011945 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011946 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011947
11948 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11949 return -EINVAL;
11950
11951 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11952 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11953
11954 /* Make sure none of the offsets and sizes make us read beyond
11955 * the end of the firmware data */
11956 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11957 offset = be32_to_cpu(sections[i].offset);
11958 len = be32_to_cpu(sections[i].len);
11959 if (offset + len > firmware->size) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011960 printk(KERN_ERR PFX "Section %d length is out of "
11961 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011962 return -EINVAL;
11963 }
11964 }
11965
11966 /* Likewise for the init_ops offsets */
11967 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11968 ops_offsets = (u16 *)(firmware->data + offset);
11969 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11970
11971 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11972 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011973 printk(KERN_ERR PFX "Section offset %d is out of "
11974 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011975 return -EINVAL;
11976 }
11977 }
11978
11979 /* Check FW version */
11980 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11981 fw_ver = firmware->data + offset;
11982 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11983 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11984 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11985 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11986 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11987 " Should be %d.%d.%d.%d\n",
11988 fw_ver[0], fw_ver[1], fw_ver[2],
11989 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11990 BCM_5710_FW_MINOR_VERSION,
11991 BCM_5710_FW_REVISION_VERSION,
11992 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011993 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011994 }
11995
11996 return 0;
11997}
11998
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011999static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012000{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012001 const __be32 *source = (const __be32 *)_source;
12002 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012003 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012004
12005 for (i = 0; i < n/4; i++)
12006 target[i] = be32_to_cpu(source[i]);
12007}
12008
12009/*
12010 Ops array is stored in the following format:
12011 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12012 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012013static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012014{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012015 const __be32 *source = (const __be32 *)_source;
12016 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012017 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012018
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012019 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012020 tmp = be32_to_cpu(source[j]);
12021 target[i].op = (tmp >> 24) & 0xff;
12022 target[i].offset = tmp & 0xffffff;
12023 target[i].raw_data = be32_to_cpu(source[j+1]);
12024 }
12025}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012026
12027static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012028{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012029 const __be16 *source = (const __be16 *)_source;
12030 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012031 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012032
12033 for (i = 0; i < n/2; i++)
12034 target[i] = be16_to_cpu(source[i]);
12035}
12036
12037#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012038 do { \
12039 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12040 bp->arr = kmalloc(len, GFP_KERNEL); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012041 if (!bp->arr) { \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012042 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12043 "for "#arr"\n", len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012044 goto lbl; \
12045 } \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12047 (u8 *)bp->arr, len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012048 } while (0)
12049
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012050static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12051{
Ben Hutchings45229b42009-11-07 11:53:39 +000012052 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012053 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012054 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012055
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012056 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012057 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012058 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012059 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012060
12061 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12062
12063 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12064 if (rc) {
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012065 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12066 fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012067 goto request_firmware_exit;
12068 }
12069
12070 rc = bnx2x_check_firmware(bp);
12071 if (rc) {
12072 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12073 goto request_firmware_exit;
12074 }
12075
12076 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12077
12078 /* Initialize the pointers to the init arrays */
12079 /* Blob */
12080 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12081
12082 /* Opcodes */
12083 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12084
12085 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012086 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12087 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012088
12089 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012090 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12091 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12092 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12093 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12094 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12095 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12096 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12097 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12098 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12099 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12100 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12101 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12102 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12103 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12104 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12105 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012106
12107 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012108
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012109init_offsets_alloc_err:
12110 kfree(bp->init_ops);
12111init_ops_alloc_err:
12112 kfree(bp->init_data);
12113request_firmware_exit:
12114 release_firmware(bp->firmware);
12115
12116 return rc;
12117}
12118
12119
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012120static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12121 const struct pci_device_id *ent)
12122{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012123 struct net_device *dev = NULL;
12124 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012125 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012126 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012127
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012128 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012129 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012130 if (!dev) {
12131 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012132 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012133 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012135 bp = netdev_priv(dev);
12136 bp->msglevel = debug;
12137
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012138 pci_set_drvdata(pdev, dev);
12139
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012140 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012141 if (rc < 0) {
12142 free_netdev(dev);
12143 return rc;
12144 }
12145
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012146 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012147 if (rc)
12148 goto init_one_exit;
12149
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012150 /* Set init arrays */
12151 rc = bnx2x_init_firmware(bp, &pdev->dev);
12152 if (rc) {
12153 printk(KERN_ERR PFX "Error loading firmware\n");
12154 goto init_one_exit;
12155 }
12156
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012157 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012158 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012159 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012160 goto init_one_exit;
12161 }
12162
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012163 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Eliezer Tamir25047952008-02-28 11:50:16 -080012164 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000012165 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012166 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012167 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
Eliezer Tamir25047952008-02-28 11:50:16 -080012168 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070012169 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012170
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012171 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012172
12173init_one_exit:
12174 if (bp->regview)
12175 iounmap(bp->regview);
12176
12177 if (bp->doorbells)
12178 iounmap(bp->doorbells);
12179
12180 free_netdev(dev);
12181
12182 if (atomic_read(&pdev->enable_cnt) == 1)
12183 pci_release_regions(pdev);
12184
12185 pci_disable_device(pdev);
12186 pci_set_drvdata(pdev, NULL);
12187
12188 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012189}
12190
12191static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12192{
12193 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012194 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012195
Eliezer Tamir228241e2008-02-28 11:56:57 -080012196 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080012197 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12198 return;
12199 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012200 bp = netdev_priv(dev);
12201
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012202 unregister_netdev(dev);
12203
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012204 kfree(bp->init_ops_offsets);
12205 kfree(bp->init_ops);
12206 kfree(bp->init_data);
12207 release_firmware(bp->firmware);
12208
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012209 if (bp->regview)
12210 iounmap(bp->regview);
12211
12212 if (bp->doorbells)
12213 iounmap(bp->doorbells);
12214
12215 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012216
12217 if (atomic_read(&pdev->enable_cnt) == 1)
12218 pci_release_regions(pdev);
12219
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012220 pci_disable_device(pdev);
12221 pci_set_drvdata(pdev, NULL);
12222}
12223
12224static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12225{
12226 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012227 struct bnx2x *bp;
12228
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012229 if (!dev) {
12230 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12231 return -ENODEV;
12232 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012233 bp = netdev_priv(dev);
12234
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012235 rtnl_lock();
12236
12237 pci_save_state(pdev);
12238
12239 if (!netif_running(dev)) {
12240 rtnl_unlock();
12241 return 0;
12242 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012243
12244 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012245
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012246 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012247
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012248 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012249
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012250 rtnl_unlock();
12251
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012252 return 0;
12253}
12254
12255static int bnx2x_resume(struct pci_dev *pdev)
12256{
12257 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012258 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012259 int rc;
12260
Eliezer Tamir228241e2008-02-28 11:56:57 -080012261 if (!dev) {
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12263 return -ENODEV;
12264 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012265 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012266
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012267 rtnl_lock();
12268
Eliezer Tamir228241e2008-02-28 11:56:57 -080012269 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012270
12271 if (!netif_running(dev)) {
12272 rtnl_unlock();
12273 return 0;
12274 }
12275
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012276 bnx2x_set_power_state(bp, PCI_D0);
12277 netif_device_attach(dev);
12278
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012279 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012280
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012281 rtnl_unlock();
12282
12283 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012284}
12285
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012286static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12287{
12288 int i;
12289
12290 bp->state = BNX2X_STATE_ERROR;
12291
12292 bp->rx_mode = BNX2X_RX_MODE_NONE;
12293
12294 bnx2x_netif_stop(bp, 0);
12295
12296 del_timer_sync(&bp->timer);
12297 bp->stats_state = STATS_STATE_DISABLED;
12298 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12299
12300 /* Release IRQs */
12301 bnx2x_free_irq(bp);
12302
12303 if (CHIP_IS_E1(bp)) {
12304 struct mac_configuration_cmd *config =
12305 bnx2x_sp(bp, mcast_config);
12306
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012307 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012308 CAM_INVALIDATE(config->config_table[i]);
12309 }
12310
12311 /* Free SKBs, SGEs, TPA pool and driver internals */
12312 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012313 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012314 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012315 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012316 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012317 bnx2x_free_mem(bp);
12318
12319 bp->state = BNX2X_STATE_CLOSED;
12320
12321 netif_carrier_off(bp->dev);
12322
12323 return 0;
12324}
12325
12326static void bnx2x_eeh_recover(struct bnx2x *bp)
12327{
12328 u32 val;
12329
12330 mutex_init(&bp->port.phy_mutex);
12331
12332 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12333 bp->link_params.shmem_base = bp->common.shmem_base;
12334 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12335
12336 if (!bp->common.shmem_base ||
12337 (bp->common.shmem_base < 0xA0000) ||
12338 (bp->common.shmem_base >= 0xC0000)) {
12339 BNX2X_DEV_INFO("MCP not active\n");
12340 bp->flags |= NO_MCP_FLAG;
12341 return;
12342 }
12343
12344 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12345 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12346 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347 BNX2X_ERR("BAD MCP validity signature\n");
12348
12349 if (!BP_NOMCP(bp)) {
12350 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12351 & DRV_MSG_SEQ_NUMBER_MASK);
12352 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12353 }
12354}
12355
Wendy Xiong493adb12008-06-23 20:36:22 -070012356/**
12357 * bnx2x_io_error_detected - called when PCI error is detected
12358 * @pdev: Pointer to PCI device
12359 * @state: The current pci connection state
12360 *
12361 * This function is called after a PCI bus error affecting
12362 * this device has been detected.
12363 */
12364static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12365 pci_channel_state_t state)
12366{
12367 struct net_device *dev = pci_get_drvdata(pdev);
12368 struct bnx2x *bp = netdev_priv(dev);
12369
12370 rtnl_lock();
12371
12372 netif_device_detach(dev);
12373
Dean Nelson07ce50e2009-07-31 09:13:25 +000012374 if (state == pci_channel_io_perm_failure) {
12375 rtnl_unlock();
12376 return PCI_ERS_RESULT_DISCONNECT;
12377 }
12378
Wendy Xiong493adb12008-06-23 20:36:22 -070012379 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012380 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012381
12382 pci_disable_device(pdev);
12383
12384 rtnl_unlock();
12385
12386 /* Request a slot reset */
12387 return PCI_ERS_RESULT_NEED_RESET;
12388}
12389
12390/**
12391 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12392 * @pdev: Pointer to PCI device
12393 *
12394 * Restart the card from scratch, as if from a cold-boot.
12395 */
12396static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12397{
12398 struct net_device *dev = pci_get_drvdata(pdev);
12399 struct bnx2x *bp = netdev_priv(dev);
12400
12401 rtnl_lock();
12402
12403 if (pci_enable_device(pdev)) {
12404 dev_err(&pdev->dev,
12405 "Cannot re-enable PCI device after reset\n");
12406 rtnl_unlock();
12407 return PCI_ERS_RESULT_DISCONNECT;
12408 }
12409
12410 pci_set_master(pdev);
12411 pci_restore_state(pdev);
12412
12413 if (netif_running(dev))
12414 bnx2x_set_power_state(bp, PCI_D0);
12415
12416 rtnl_unlock();
12417
12418 return PCI_ERS_RESULT_RECOVERED;
12419}
12420
12421/**
12422 * bnx2x_io_resume - called when traffic can start flowing again
12423 * @pdev: Pointer to PCI device
12424 *
12425 * This callback is called when the error recovery driver tells us that
12426 * its OK to resume normal operation.
12427 */
12428static void bnx2x_io_resume(struct pci_dev *pdev)
12429{
12430 struct net_device *dev = pci_get_drvdata(pdev);
12431 struct bnx2x *bp = netdev_priv(dev);
12432
12433 rtnl_lock();
12434
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012435 bnx2x_eeh_recover(bp);
12436
Wendy Xiong493adb12008-06-23 20:36:22 -070012437 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012438 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012439
12440 netif_device_attach(dev);
12441
12442 rtnl_unlock();
12443}
12444
12445static struct pci_error_handlers bnx2x_err_handler = {
12446 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012447 .slot_reset = bnx2x_io_slot_reset,
12448 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012449};
12450
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012451static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012452 .name = DRV_MODULE_NAME,
12453 .id_table = bnx2x_pci_tbl,
12454 .probe = bnx2x_init_one,
12455 .remove = __devexit_p(bnx2x_remove_one),
12456 .suspend = bnx2x_suspend,
12457 .resume = bnx2x_resume,
12458 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012459};
12460
12461static int __init bnx2x_init(void)
12462{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012463 int ret;
12464
Eilon Greenstein938cf542009-08-12 08:23:37 +000012465 printk(KERN_INFO "%s", version);
12466
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012467 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12468 if (bnx2x_wq == NULL) {
12469 printk(KERN_ERR PFX "Cannot create workqueue\n");
12470 return -ENOMEM;
12471 }
12472
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012473 ret = pci_register_driver(&bnx2x_pci_driver);
12474 if (ret) {
12475 printk(KERN_ERR PFX "Cannot register driver\n");
12476 destroy_workqueue(bnx2x_wq);
12477 }
12478 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012479}
12480
12481static void __exit bnx2x_cleanup(void)
12482{
12483 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012484
12485 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012486}
12487
12488module_init(bnx2x_init);
12489module_exit(bnx2x_cleanup);
12490
Michael Chan993ac7b2009-10-10 13:46:56 +000012491#ifdef BCM_CNIC
12492
12493/* count denotes the number of new completions we have seen */
12494static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12495{
12496 struct eth_spe *spe;
12497
12498#ifdef BNX2X_STOP_ON_ERROR
12499 if (unlikely(bp->panic))
12500 return;
12501#endif
12502
12503 spin_lock_bh(&bp->spq_lock);
12504 bp->cnic_spq_pending -= count;
12505
12506 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12507 bp->cnic_spq_pending++) {
12508
12509 if (!bp->cnic_kwq_pending)
12510 break;
12511
12512 spe = bnx2x_sp_get_next(bp);
12513 *spe = *bp->cnic_kwq_cons;
12514
12515 bp->cnic_kwq_pending--;
12516
12517 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12518 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12519
12520 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12521 bp->cnic_kwq_cons = bp->cnic_kwq;
12522 else
12523 bp->cnic_kwq_cons++;
12524 }
12525 bnx2x_sp_prod_update(bp);
12526 spin_unlock_bh(&bp->spq_lock);
12527}
12528
12529static int bnx2x_cnic_sp_queue(struct net_device *dev,
12530 struct kwqe_16 *kwqes[], u32 count)
12531{
12532 struct bnx2x *bp = netdev_priv(dev);
12533 int i;
12534
12535#ifdef BNX2X_STOP_ON_ERROR
12536 if (unlikely(bp->panic))
12537 return -EIO;
12538#endif
12539
12540 spin_lock_bh(&bp->spq_lock);
12541
12542 for (i = 0; i < count; i++) {
12543 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12544
12545 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12546 break;
12547
12548 *bp->cnic_kwq_prod = *spe;
12549
12550 bp->cnic_kwq_pending++;
12551
12552 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12553 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12554 spe->data.mac_config_addr.hi,
12555 spe->data.mac_config_addr.lo,
12556 bp->cnic_kwq_pending);
12557
12558 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12559 bp->cnic_kwq_prod = bp->cnic_kwq;
12560 else
12561 bp->cnic_kwq_prod++;
12562 }
12563
12564 spin_unlock_bh(&bp->spq_lock);
12565
12566 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12567 bnx2x_cnic_sp_post(bp, 0);
12568
12569 return i;
12570}
12571
12572static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12573{
12574 struct cnic_ops *c_ops;
12575 int rc = 0;
12576
12577 mutex_lock(&bp->cnic_mutex);
12578 c_ops = bp->cnic_ops;
12579 if (c_ops)
12580 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12581 mutex_unlock(&bp->cnic_mutex);
12582
12583 return rc;
12584}
12585
12586static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12587{
12588 struct cnic_ops *c_ops;
12589 int rc = 0;
12590
12591 rcu_read_lock();
12592 c_ops = rcu_dereference(bp->cnic_ops);
12593 if (c_ops)
12594 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12595 rcu_read_unlock();
12596
12597 return rc;
12598}
12599
12600/*
12601 * for commands that have no data
12602 */
12603static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12604{
12605 struct cnic_ctl_info ctl = {0};
12606
12607 ctl.cmd = cmd;
12608
12609 return bnx2x_cnic_ctl_send(bp, &ctl);
12610}
12611
12612static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12613{
12614 struct cnic_ctl_info ctl;
12615
12616 /* first we tell CNIC and only then we count this as a completion */
12617 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12618 ctl.data.comp.cid = cid;
12619
12620 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12621 bnx2x_cnic_sp_post(bp, 1);
12622}
12623
12624static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12625{
12626 struct bnx2x *bp = netdev_priv(dev);
12627 int rc = 0;
12628
12629 switch (ctl->cmd) {
12630 case DRV_CTL_CTXTBL_WR_CMD: {
12631 u32 index = ctl->data.io.offset;
12632 dma_addr_t addr = ctl->data.io.dma_addr;
12633
12634 bnx2x_ilt_wr(bp, index, addr);
12635 break;
12636 }
12637
12638 case DRV_CTL_COMPLETION_CMD: {
12639 int count = ctl->data.comp.comp_count;
12640
12641 bnx2x_cnic_sp_post(bp, count);
12642 break;
12643 }
12644
12645 /* rtnl_lock is held. */
12646 case DRV_CTL_START_L2_CMD: {
12647 u32 cli = ctl->data.ring.client_id;
12648
12649 bp->rx_mode_cl_mask |= (1 << cli);
12650 bnx2x_set_storm_rx_mode(bp);
12651 break;
12652 }
12653
12654 /* rtnl_lock is held. */
12655 case DRV_CTL_STOP_L2_CMD: {
12656 u32 cli = ctl->data.ring.client_id;
12657
12658 bp->rx_mode_cl_mask &= ~(1 << cli);
12659 bnx2x_set_storm_rx_mode(bp);
12660 break;
12661 }
12662
12663 default:
12664 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12665 rc = -EINVAL;
12666 }
12667
12668 return rc;
12669}
12670
12671static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12672{
12673 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12674
12675 if (bp->flags & USING_MSIX_FLAG) {
12676 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12677 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12678 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12679 } else {
12680 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12681 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12682 }
12683 cp->irq_arr[0].status_blk = bp->cnic_sb;
12684 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12685 cp->irq_arr[1].status_blk = bp->def_status_blk;
12686 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12687
12688 cp->num_irq = 2;
12689}
12690
12691static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12692 void *data)
12693{
12694 struct bnx2x *bp = netdev_priv(dev);
12695 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12696
12697 if (ops == NULL)
12698 return -EINVAL;
12699
12700 if (atomic_read(&bp->intr_sem) != 0)
12701 return -EBUSY;
12702
12703 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12704 if (!bp->cnic_kwq)
12705 return -ENOMEM;
12706
12707 bp->cnic_kwq_cons = bp->cnic_kwq;
12708 bp->cnic_kwq_prod = bp->cnic_kwq;
12709 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12710
12711 bp->cnic_spq_pending = 0;
12712 bp->cnic_kwq_pending = 0;
12713
12714 bp->cnic_data = data;
12715
12716 cp->num_irq = 0;
12717 cp->drv_state = CNIC_DRV_STATE_REGD;
12718
12719 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12720
12721 bnx2x_setup_cnic_irq_info(bp);
12722 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12723 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12724 rcu_assign_pointer(bp->cnic_ops, ops);
12725
12726 return 0;
12727}
12728
12729static int bnx2x_unregister_cnic(struct net_device *dev)
12730{
12731 struct bnx2x *bp = netdev_priv(dev);
12732 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12733
12734 mutex_lock(&bp->cnic_mutex);
12735 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12736 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12737 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12738 }
12739 cp->drv_state = 0;
12740 rcu_assign_pointer(bp->cnic_ops, NULL);
12741 mutex_unlock(&bp->cnic_mutex);
12742 synchronize_rcu();
12743 kfree(bp->cnic_kwq);
12744 bp->cnic_kwq = NULL;
12745
12746 return 0;
12747}
12748
12749struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12750{
12751 struct bnx2x *bp = netdev_priv(dev);
12752 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12753
12754 cp->drv_owner = THIS_MODULE;
12755 cp->chip_id = CHIP_ID(bp);
12756 cp->pdev = bp->pdev;
12757 cp->io_base = bp->regview;
12758 cp->io_base2 = bp->doorbells;
12759 cp->max_kwqe_pending = 8;
12760 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12761 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12762 cp->ctx_tbl_len = CNIC_ILT_LINES;
12763 cp->starting_cid = BCM_CNIC_CID_START;
12764 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12765 cp->drv_ctl = bnx2x_drv_ctl;
12766 cp->drv_register_cnic = bnx2x_register_cnic;
12767 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12768
12769 return cp;
12770}
12771EXPORT_SYMBOL(bnx2x_cnic_probe);
12772
12773#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012774