blob: 77ba13520d87ae539854da167551892655b3ec3d [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000060#define DRV_MODULE_VERSION "1.52.1-5"
Eilon Greenstein0ab365f2009-11-09 06:09:37 +000061#define DRV_MODULE_RELDATE "2009/11/09"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143static const struct pci_device_id bnx2x_pci_tbl[] = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perchesad361c92009-07-06 13:05:40 -0700517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perchesad361c92009-07-06 13:05:40 -0700519 printk(KERN_ERR PFX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800525 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800532 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perchesad361c92009-07-06 13:05:40 -0700534 printk(KERN_ERR PFX "end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 u8 storm, u16 index, u8 op, u8 update)
769{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
Eilon Greenstein5c862842008-08-13 15:51:48 -0700781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788}
789
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791{
792 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200793
794 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200797}
798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804
Eilon Greenstein5c862842008-08-13 15:51:48 -0700805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000821}
822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200834 int nbd;
835
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847
Eilon Greensteinca003922009-08-12 22:53:28 -0700848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 bnx2x_panic();
853 }
854#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700855 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700882 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000883 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888}
889
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892 s16 used;
893 u16 prod;
894 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200903
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700904#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700908#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911}
912
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924{
925 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000926 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000931 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932#endif
933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000934 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200946 hw_cons, sw_cons, pkt_cons);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000961 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200962
Eilon Greenstein60447352009-03-02 07:59:24 +0000963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000971 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700972 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000974 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000976 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200977}
978
Michael Chan993ac7b2009-10-10 13:46:56 +0000979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000992 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700993 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200994
995 bp->spq_left++;
996
Eilon Greenstein0626b892009-02-12 08:38:14 +0000997 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001016 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001017 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001018 return;
1019 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036 break;
1037
Michael Chan993ac7b2009-10-10 13:46:56 +00001038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001044
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001048 bp->set_mac_pending--;
1049 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001050 break;
1051
Eliezer Tamir49d66772008-02-28 11:53:13 -08001052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001054 bp->set_mac_pending--;
1055 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001056 break;
1057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001061 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001063 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064}
1065
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001107 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001202 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001203 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
Eilon Greenstein33471622008-08-13 15:59:08 -07001256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001340 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001347 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001382
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001383 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001449 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001452 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001463 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001484 ((u32 *)&rx_prods)[i]);
1485
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001486 mmiowb(); /* keep prod updates ordered */
1487
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001488 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001491}
1492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001513 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001524 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001525
1526 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001527 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001530 u8 cqe_fp_flags;
1531 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001553
1554 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001573 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001602 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001623 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001638 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001639 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001640 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
Eilon Greensteina119a062009-08-12 08:23:23 +00001654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001658 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001665 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001666 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001667 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001676 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001679 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001680 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001682 }
1683
Eilon Greenstein748e5432009-02-12 08:36:37 +00001684 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001694 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001708 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001713 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001731
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001739 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
1746
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001753
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001759 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001760 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001761 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001762 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001764 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001771 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
Eilon Greenstein3196a882008-08-13 15:58:49 -07001777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
Eilon Greensteinca003922009-08-12 22:53:28 -07001782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001784
Eilon Greensteinca003922009-08-12 22:53:28 -07001785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001795 status &= ~mask;
1796 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797 }
1798
Michael Chan993ac7b2009-10-10 13:46:56 +00001799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001814 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001825
1826 return IRQ_HANDLED;
1827}
1828
1829/* end of fast path */
1830
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001832
1833/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834
1835/*
1836 * General service functions
1837 */
1838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001840{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001845 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001846
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
1854
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
Eliezer Tamirf1410642008-02-28 11:51:50 -08001862 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
1869
Eilon Greenstein46230472008-08-25 15:23:30 -07001870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001872 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001875 if (lock_status & resource_bit)
1876 return 0;
1877
1878 msleep(5);
1879 }
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
1883
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001890
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
Eliezer Tamirf1410642008-02-28 11:51:50 -08001906 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001907 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
1912 }
1913
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001914 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001915 return 0;
1916}
1917
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001918/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001920{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001921 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001922
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001925}
1926
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001927static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001928{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001932 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001933}
1934
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
1979
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
1992
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
2000
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
2007
2008 default:
2009 break;
2010 }
2011
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002014
2015 return 0;
2016}
2017
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
Eliezer Tamirf1410642008-02-28 11:51:50 -08002064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065{
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
2068
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
2073 }
2074
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
2086
Eilon Greenstein6378c022008-08-13 15:59:25 -07002087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
2093
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
2099
2100 default:
2101 break;
2102 }
2103
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002106
2107 return 0;
2108}
2109
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002111{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002116 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002117 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002118
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002121 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002122 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002123
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002126 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002127
Eliezer Tamirf1410642008-02-28 11:51:50 -08002128 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002131 break;
2132 }
2133}
2134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135static void bnx2x_link_report(struct bnx2x *bp)
2136{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002137 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2141 }
2142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002144 u16 line_speed;
2145
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
2160 printk("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002161
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002162 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002163 printk("full duplex");
2164 else
2165 printk("half duplex");
2166
David S. Millerc0700f92008-12-16 23:53:20 -08002167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002172 printk("& transmit ");
2173 } else {
2174 printk(", transmit ");
2175 }
2176 printk("flow control ON");
2177 }
2178 printk("\n");
2179
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183 }
2184}
2185
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002187{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
Eilon Greenstein19680c42008-08-13 15:47:33 -07002191 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002194 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002196 else
David S. Millerc0700f92008-12-16 23:53:20 -08002197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002198
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002199 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
Eilon Greenstein19680c42008-08-13 15:47:33 -07002204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002205
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002206 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002207
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002208 bnx2x_calc_fc_adv(bp);
2209
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002212 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002213 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002214
Eilon Greenstein19680c42008-08-13 15:47:33 -07002215 return rc;
2216 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002218 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002219}
2220
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002221static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002222{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002223 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002224 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002226 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002227
Eilon Greenstein19680c42008-08-13 15:47:33 -07002228 bnx2x_calc_fc_adv(bp);
2229 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002230 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002231}
2232
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002235 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002236 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002238 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002239 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002241}
2242
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
2246
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002247 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002249 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002250
2251 return rc;
2252}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002254static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002255{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002259
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002263
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002266
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002277
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002280
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002287}
2288
Eilon Greenstein2691d512009-08-12 08:22:08 +00002289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002333}
2334
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002371 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002398/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002399static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002400{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002406 if (bp->link_vars.link_up) {
2407
Eilon Greenstein1c063282009-02-12 08:36:43 +00002408 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002418 pause_enabled);
2419 }
2420
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002429 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002433 /* indicate link status */
2434 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002435
2436 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002437 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002438 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002439 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002440
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002441 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002446 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002450
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002451 if (bp->link_vars.link_up) {
2452 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002453
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002467 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002468}
2469
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002470static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002471{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002473 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002474
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
Eilon Greenstein2691d512009-08-12 08:22:08 +00002482 bnx2x_calc_vn_weight_sum(bp);
2483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002484 /* indicate link status */
2485 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002486}
2487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002502}
2503
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002504/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
Eilon Greenstein2691d512009-08-12 08:22:08 +00002512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002521 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002546 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002558
2559 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563 netif_carrier_off(bp->dev);
2564}
2565
2566static void bnx2x_e1h_enable(struct bnx2x *bp)
2567{
2568 int port = BP_PORT(bp);
2569
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
Eilon Greenstein2691d512009-08-12 08:22:08 +00002572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2574
Eilon Greenstein061bc702009-10-15 00:18:47 -07002575 /*
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002579}
2580
2581static void bnx2x_update_min_max(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584 int vn, i;
2585
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2588
2589 bnx2x_calc_vn_weight_sum(bp);
2590
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594 if (bp->port.pmf) {
2595 int func;
2596
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2601
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605 }
2606
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2612 }
2613}
2614
2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002628 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002629
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002633 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002634
2635 bnx2x_e1h_enable(bp);
2636 }
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638 }
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643 }
2644
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650}
2651
Michael Chan28912902009-10-10 13:46:53 +00002652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002681/* the slow path queue is odd since completions arrive on the fastpath ring */
2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2684{
Michael Chan28912902009-10-10 13:46:53 +00002685 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693#ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696#endif
2697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002698 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002702 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002703 bnx2x_panic();
2704 return -EBUSY;
2705 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002706
Michael Chan28912902009-10-10 13:46:53 +00002707 spe = bnx2x_sp_get_next(bp);
2708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002710 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002714 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002715 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
Michael Chan28912902009-10-10 13:46:53 +00002718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720
2721 bp->spq_left--;
2722
Michael Chan28912902009-10-10 13:46:53 +00002723 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002724 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002725 return 0;
2726}
2727
2728/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002729static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002732 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2742
2743 msleep(5);
2744 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002745 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 rc = -EBUSY;
2748 }
2749
2750 return rc;
2751}
2752
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002753/* release split MCP access lock register */
2754static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002755{
2756 u32 val = 0;
2757
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759}
2760
2761static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762{
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2765
2766 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2770 }
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2774 }
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2778 }
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2782 }
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2786 }
2787 return rc;
2788}
2789
2790/*
2791 * slow path service functions
2792 */
2793
2794static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002796 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002803 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002804 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002805
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2808
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2811
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002819
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002823
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002826
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002827 bnx2x_acquire_phy_lock(bp);
2828
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002829 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002830 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002831 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002833 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002834
2835 /* handle unicore attn? */
2836 }
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853 }
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857 }
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861 }
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874 }
2875 }
2876
2877 } /* if hardwired */
2878
Eilon Greenstein5c862842008-08-13 15:51:48 -07002879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002882
2883 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002884 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002885 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002886 bnx2x_release_phy_lock(bp);
2887 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002888}
2889
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002890static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891{
2892 int port = BP_PORT(bp);
2893
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2899
2900 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902 " the driver to shutdown the card to prevent permanent"
2903 " damage. Please contact Dell Support for assistance\n",
2904 bp->dev->name);
2905}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002906
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002907static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002909 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002910 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002911 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002912
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002913 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002915
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002916 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002917
2918 val = REG_RD(bp, reg_offset);
2919 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920 REG_WR(bp, reg_offset, val);
2921
2922 BNX2X_ERR("SPIO5 hw attention\n");
2923
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002924 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002927 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002928 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002929 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002930 /* The PHY reset is controlled by GPIO 1 */
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002933 break;
2934
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002935 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936 /* The PHY reset is controlled by GPIO 1 */
2937 /* fake the port number to cancel the swap done in
2938 set_gpio() */
2939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941 port = (swap_val && swap_override) ^ 1;
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944 break;
2945
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002946 default:
2947 break;
2948 }
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002949 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002950 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002951
Eilon Greenstein589abe32009-02-12 08:36:55 +00002952 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954 bnx2x_acquire_phy_lock(bp);
2955 bnx2x_handle_module_detect_int(&bp->link_params);
2956 bnx2x_release_phy_lock(bp);
2957 }
2958
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002959 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961 val = REG_RD(bp, reg_offset);
2962 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963 REG_WR(bp, reg_offset, val);
2964
2965 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002966 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002967 bnx2x_panic();
2968 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002969}
2970
2971static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972{
2973 u32 val;
2974
Eilon Greenstein0626b892009-02-12 08:38:14 +00002975 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002976
2977 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979 /* DORQ discard attention */
2980 if (val & 0x2)
2981 BNX2X_ERR("FATAL error from DORQ\n");
2982 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002983
2984 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2988
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002997 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002998 bnx2x_panic();
2999 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003000}
3001
3002static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003{
3004 u32 val;
3005
3006 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010 /* CFC error attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from CFC\n");
3013 }
3014
3015 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019 /* RQ_USDMDP_FIFO_OVERFLOW */
3020 if (val & 0x18000)
3021 BNX2X_ERR("FATAL error from PXP\n");
3022 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003023
3024 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026 int port = BP_PORT(bp);
3027 int reg_offset;
3028
3029 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032 val = REG_RD(bp, reg_offset);
3033 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034 REG_WR(bp, reg_offset, val);
3035
3036 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003037 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003038 bnx2x_panic();
3039 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003040}
3041
3042static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003044 u32 val;
3045
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003046 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003048 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049 int func = BP_FUNC(bp);
3050
3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3056 bnx2x_dcc_event(bp,
3057 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003058 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003059 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003060 bnx2x_pmf_update(bp);
3061
3062 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003063
3064 BNX2X_ERR("MC assert!\n");
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069 bnx2x_panic();
3070
3071 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073 BNX2X_ERR("MCP assert!\n");
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003075 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003076
3077 } else
3078 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079 }
3080
3081 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003082 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083 if (attn & BNX2X_GRC_TIMEOUT) {
3084 val = CHIP_IS_E1H(bp) ?
3085 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087 }
3088 if (attn & BNX2X_GRC_RSV) {
3089 val = CHIP_IS_E1H(bp) ?
3090 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003093 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003094 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003095}
3096
3097static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003099 struct attn_route attn;
3100 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003101 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003102 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003103 u32 reg_addr;
3104 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003105 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003106
3107 /* need to take HW lock because MCP or other port might also
3108 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003109 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003110
3111 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003115 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003117
3118 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119 if (deasserted & (1 << index)) {
3120 group_mask = bp->attn_group[index];
3121
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003122 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123 index, group_mask.sig[0], group_mask.sig[1],
3124 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003125
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003126 bnx2x_attn_int_deasserted3(bp,
3127 attn.sig[3] & group_mask.sig[3]);
3128 bnx2x_attn_int_deasserted1(bp,
3129 attn.sig[1] & group_mask.sig[1]);
3130 bnx2x_attn_int_deasserted2(bp,
3131 attn.sig[2] & group_mask.sig[2]);
3132 bnx2x_attn_int_deasserted0(bp,
3133 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003134
3135 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003136 HW_PRTY_ASSERT_SET_0) ||
3137 (attn.sig[1] & group_mask.sig[1] &
3138 HW_PRTY_ASSERT_SET_1) ||
3139 (attn.sig[2] & group_mask.sig[2] &
3140 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003141 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003142 }
3143 }
3144
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003145 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003146
Eilon Greenstein5c862842008-08-13 15:51:48 -07003147 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003148
3149 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003150 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003152 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003153
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003154 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003155 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003156
3157 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003162
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003163 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3164 aeu_mask, deasserted);
3165 aeu_mask |= (deasserted & 0xff);
3166 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168 REG_WR(bp, reg_addr, aeu_mask);
3169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003170
3171 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172 bp->attn_state &= ~deasserted;
3173 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174}
3175
3176static void bnx2x_attn_int(struct bnx2x *bp)
3177{
3178 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003179 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits);
3181 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003183 u32 attn_state = bp->attn_state;
3184
3185 /* look for changed bits */
3186 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3187 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3188
3189 DP(NETIF_MSG_HW,
3190 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3191 attn_bits, attn_ack, asserted, deasserted);
3192
3193 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003194 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003195
3196 /* handle bits that were raised */
3197 if (asserted)
3198 bnx2x_attn_int_asserted(bp, asserted);
3199
3200 if (deasserted)
3201 bnx2x_attn_int_deasserted(bp, deasserted);
3202}
3203
3204static void bnx2x_sp_task(struct work_struct *work)
3205{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003206 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003207 u16 status;
3208
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003209
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003210 /* Return here if interrupt is disabled */
3211 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003212 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003213 return;
3214 }
3215
3216 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003217/* if (status == 0) */
3218/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219
Eilon Greenstein3196a882008-08-13 15:58:49 -07003220 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003221
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003222 /* HW attentions */
3223 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003224 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003225
Eilon Greenstein68d59482009-01-14 21:27:36 -08003226 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233 IGU_INT_NOP, 1);
3234 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003236
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003237}
3238
3239static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240{
3241 struct net_device *dev = dev_instance;
3242 struct bnx2x *bp = netdev_priv(dev);
3243
3244 /* Return here if interrupt is disabled */
3245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003247 return IRQ_HANDLED;
3248 }
3249
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003251
3252#ifdef BNX2X_STOP_ON_ERROR
3253 if (unlikely(bp->panic))
3254 return IRQ_HANDLED;
3255#endif
3256
Michael Chan993ac7b2009-10-10 13:46:56 +00003257#ifdef BCM_CNIC
3258 {
3259 struct cnic_ops *c_ops;
3260
3261 rcu_read_lock();
3262 c_ops = rcu_dereference(bp->cnic_ops);
3263 if (c_ops)
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3265 rcu_read_unlock();
3266 }
3267#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003269
3270 return IRQ_HANDLED;
3271}
3272
3273/* end of slow path */
3274
3275/* Statistics */
3276
3277/****************************************************************************
3278* Macros
3279****************************************************************************/
3280
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003281/* sum[hi:lo] += add[hi:lo] */
3282#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283 do { \
3284 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003285 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003286 } while (0)
3287
3288/* difference = minuend - subtrahend */
3289#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003291 if (m_lo < s_lo) { \
3292 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003293 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003294 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003295 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003296 d_hi--; \
3297 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003298 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003299 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003300 d_hi = 0; \
3301 d_lo = 0; \
3302 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003303 } else { \
3304 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003305 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003306 d_hi = 0; \
3307 d_lo = 0; \
3308 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003309 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003310 d_hi = m_hi - s_hi; \
3311 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003312 } \
3313 } \
3314 } while (0)
3315
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003316#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003317 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003318 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003324 } while (0)
3325
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003326#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003327 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003328 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329 diff.lo, new->s##_lo, old->s##_lo); \
3330 ADD_64(estats->t##_hi, diff.hi, \
3331 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003332 } while (0)
3333
3334/* sum[hi:lo] += add */
3335#define ADD_EXTEND_64(s_hi, s_lo, a) \
3336 do { \
3337 s_lo += a; \
3338 s_hi += (s_lo < a) ? 1 : 0; \
3339 } while (0)
3340
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003341#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003343 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344 pstats->mac_stx[1].s##_lo, \
3345 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003346 } while (0)
3347
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003348#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003349 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003350 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353 } while (0)
3354
3355#define UPDATE_EXTEND_USTAT(s, t) \
3356 do { \
3357 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358 old_uclient->s = uclient->s; \
3359 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003360 } while (0)
3361
3362#define UPDATE_EXTEND_XSTAT(s, t) \
3363 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003364 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003366 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 } while (0)
3368
3369/* minuend -= subtrahend */
3370#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371 do { \
3372 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373 } while (0)
3374
3375/* minuend[hi:lo] -= subtrahend */
3376#define SUB_EXTEND_64(m_hi, m_lo, s) \
3377 do { \
3378 SUB_64(m_hi, 0, m_lo, s); \
3379 } while (0)
3380
3381#define SUB_EXTEND_USTAT(s, t) \
3382 do { \
3383 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003385 } while (0)
3386
3387/*
3388 * General service functions
3389 */
3390
3391static inline long bnx2x_hilo(u32 *hiref)
3392{
3393 u32 lo = *(hiref + 1);
3394#if (BITS_PER_LONG == 64)
3395 u32 hi = *hiref;
3396
3397 return HILO_U64(hi, lo);
3398#else
3399 return lo;
3400#endif
3401}
3402
3403/*
3404 * Init service functions
3405 */
3406
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003407static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408{
3409 if (!bp->stats_pending) {
3410 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003411 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003412
3413 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003414 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003415 for_each_queue(bp, i)
3416 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003417
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419 ((u32 *)&ramrod_data)[1],
3420 ((u32 *)&ramrod_data)[0], 0);
3421 if (rc == 0) {
3422 /* stats ramrod has it's own slot on the spq */
3423 bp->spq_left++;
3424 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003425 }
3426 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003427}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003428
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003429static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430{
3431 struct dmae_command *dmae = &bp->stats_dmae;
3432 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003435 if (CHIP_REV_IS_SLOW(bp))
3436 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003437
3438 /* loader */
3439 if (bp->executer_idx) {
3440 int loader_idx = PMF_DMAE_C(bp);
3441
3442 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446 DMAE_CMD_DST_RESET |
3447#ifdef __BIG_ENDIAN
3448 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449#else
3450 DMAE_CMD_ENDIANITY_DW_SWAP |
3451#endif
3452 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453 DMAE_CMD_PORT_0) |
3454 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458 sizeof(struct dmae_command) *
3459 (loader_idx + 1)) >> 2;
3460 dmae->dst_addr_hi = 0;
3461 dmae->len = sizeof(struct dmae_command) >> 2;
3462 if (CHIP_IS_E1(bp))
3463 dmae->len--;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467
3468 *stats_comp = 0;
3469 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471 } else if (bp->func_stx) {
3472 *stats_comp = 0;
3473 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474 }
3475}
3476
3477static int bnx2x_stats_comp(struct bnx2x *bp)
3478{
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480 int cnt = 10;
3481
3482 might_sleep();
3483 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003484 if (!cnt) {
3485 BNX2X_ERR("timeout waiting for stats finished\n");
3486 break;
3487 }
3488 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003489 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003490 }
3491 return 1;
3492}
3493
3494/*
3495 * Statistics service functions
3496 */
3497
3498static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499{
3500 struct dmae_command *dmae;
3501 u32 opcode;
3502 int loader_idx = PMF_DMAE_C(bp);
3503 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505 /* sanity */
3506 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003508 return;
3509 }
3510
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003511 bp->executer_idx = 0;
3512
3513 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514 DMAE_CMD_C_ENABLE |
3515 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516#ifdef __BIG_ENDIAN
3517 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518#else
3519 DMAE_CMD_ENDIANITY_DW_SWAP |
3520#endif
3521 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526 dmae->src_addr_lo = bp->port.port_stx >> 2;
3527 dmae->src_addr_hi = 0;
3528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->len = DMAE_LEN32_RD_MAX;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
3534
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
3541 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003543 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_val = DMAE_COMP_VAL;
3547
3548 *stats_comp = 0;
3549 bnx2x_hw_stats_post(bp);
3550 bnx2x_stats_comp(bp);
3551}
3552
3553static void bnx2x_port_stats_init(struct bnx2x *bp)
3554{
3555 struct dmae_command *dmae;
3556 int port = BP_PORT(bp);
3557 int vn = BP_E1HVN(bp);
3558 u32 opcode;
3559 int loader_idx = PMF_DMAE_C(bp);
3560 u32 mac_addr;
3561 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563 /* sanity */
3564 if (!bp->link_vars.link_up || !bp->port.pmf) {
3565 BNX2X_ERR("BUG!\n");
3566 return;
3567 }
3568
3569 bp->executer_idx = 0;
3570
3571 /* MCP */
3572 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575#ifdef __BIG_ENDIAN
3576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577#else
3578 DMAE_CMD_ENDIANITY_DW_SWAP |
3579#endif
3580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581 (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583 if (bp->port.port_stx) {
3584
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_port_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
3595 }
3596
3597 if (bp->func_stx) {
3598
3599 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600 dmae->opcode = opcode;
3601 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->dst_addr_lo = bp->func_stx >> 2;
3604 dmae->dst_addr_hi = 0;
3605 dmae->len = sizeof(struct host_func_stats) >> 2;
3606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607 dmae->comp_addr_hi = 0;
3608 dmae->comp_val = 1;
3609 }
3610
3611 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615#ifdef __BIG_ENDIAN
3616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617#else
3618 DMAE_CMD_ENDIANITY_DW_SWAP |
3619#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003622
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003623 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003624
3625 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626 NIG_REG_INGRESS_BMAC0_MEM);
3627
3628 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629 BIGMAC_REGISTER_TX_STAT_GTBYT */
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = (mac_addr +
3633 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634 dmae->src_addr_hi = 0;
3635 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646 dmae->opcode = opcode;
3647 dmae->src_addr_lo = (mac_addr +
3648 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649 dmae->src_addr_hi = 0;
3650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003653 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003654 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003660 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003661
3662 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3675 dmae->comp_val = 1;
3676
3677 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679 dmae->opcode = opcode;
3680 dmae->src_addr_lo = (mac_addr +
3681 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682 dmae->src_addr_hi = 0;
3683 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003686 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 dmae->len = 1;
3688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689 dmae->comp_addr_hi = 0;
3690 dmae->comp_val = 1;
3691
3692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694 dmae->opcode = opcode;
3695 dmae->src_addr_lo = (mac_addr +
3696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697 dmae->src_addr_hi = 0;
3698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003700 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003701 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003702 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3706 }
3707
3708 /* NIG */
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3720
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = opcode;
3723 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725 dmae->src_addr_hi = 0;
3726 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730 dmae->len = (2*sizeof(u32)) >> 2;
3731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732 dmae->comp_addr_hi = 0;
3733 dmae->comp_val = 1;
3734
3735 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003736 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739#ifdef __BIG_ENDIAN
3740 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741#else
3742 DMAE_CMD_ENDIANITY_DW_SWAP |
3743#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003744 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745 (vn << DMAE_CMD_E1HVN_SHIFT));
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003748 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_val = DMAE_COMP_VAL;
3757
3758 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759}
3760
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003761static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003762{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003763 struct dmae_command *dmae = &bp->stats_dmae;
3764 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003765
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003766 /* sanity */
3767 if (!bp->func_stx) {
3768 BNX2X_ERR("BUG!\n");
3769 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003770 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003771
3772 bp->executer_idx = 0;
3773 memset(dmae, 0, sizeof(struct dmae_command));
3774
3775 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778#ifdef __BIG_ENDIAN
3779 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780#else
3781 DMAE_CMD_ENDIANITY_DW_SWAP |
3782#endif
3783 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->dst_addr_lo = bp->func_stx >> 2;
3788 dmae->dst_addr_hi = 0;
3789 dmae->len = sizeof(struct host_func_stats) >> 2;
3790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_val = DMAE_COMP_VAL;
3793
3794 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003795}
3796
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003797static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003798{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003799 if (bp->port.pmf)
3800 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003802 else if (bp->func_stx)
3803 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003804
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807}
3808
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003809static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003810{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003811 bnx2x_stats_comp(bp);
3812 bnx2x_stats_pmf_update(bp);
3813 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003814}
3815
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003816static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003818 bnx2x_stats_comp(bp);
3819 bnx2x_stats_start(bp);
3820}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003822static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823{
3824 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003826 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003827 struct {
3828 u32 lo;
3829 u32 hi;
3830 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003831
3832 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003838 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003839 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003841 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844 UPDATE_STAT64(tx_stat_gt127,
3845 tx_stat_etherstatspkts65octetsto127octets);
3846 UPDATE_STAT64(tx_stat_gt255,
3847 tx_stat_etherstatspkts128octetsto255octets);
3848 UPDATE_STAT64(tx_stat_gt511,
3849 tx_stat_etherstatspkts256octetsto511octets);
3850 UPDATE_STAT64(tx_stat_gt1023,
3851 tx_stat_etherstatspkts512octetsto1023octets);
3852 UPDATE_STAT64(tx_stat_gt1518,
3853 tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858 UPDATE_STAT64(tx_stat_gterr,
3859 tx_stat_dot3statsinternalmactransmiterrors);
3860 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003871}
3872
3873static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874{
3875 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003878
3879 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003910
3911 estats->pause_frames_received_hi =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913 estats->pause_frames_received_lo =
3914 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915 ADD_64(estats->pause_frames_received_hi,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917 estats->pause_frames_received_lo,
3918 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920 estats->pause_frames_sent_hi =
3921 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922 estats->pause_frames_sent_lo =
3923 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924 ADD_64(estats->pause_frames_sent_hi,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926 estats->pause_frames_sent_lo,
3927 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003928}
3929
3930static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931{
3932 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933 struct nig_stats *old = &(bp->port.old_nig_stats);
3934 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003936 struct {
3937 u32 lo;
3938 u32 hi;
3939 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003940 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003941
3942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943 bnx2x_bmac_stats_update(bp);
3944
3945 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946 bnx2x_emac_stats_update(bp);
3947
3948 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003949 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003950 return -1;
3951 }
3952
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003953 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003955 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003957
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003958 UPDATE_STAT64_NIG(egress_mac_pkt0,
3959 etherstatspkts1024octetsto1522octets);
3960 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003961
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003962 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003963
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003964 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965 sizeof(struct mac_stx));
3966 estats->brb_drop_hi = pstats->brb_drop_hi;
3967 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003968
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003969 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003970
Eilon Greensteinde832a52009-02-12 08:36:33 +00003971 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972 if (nig_timer_max != estats->nig_timer_max) {
3973 estats->nig_timer_max = nig_timer_max;
3974 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975 }
3976
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003977 return 0;
3978}
3979
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003980static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003981{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003982 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003983 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003984 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003985 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003987 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003988
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00003989 memcpy(&(fstats->total_bytes_received_hi),
3990 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00003991 sizeof(struct host_func_stats) - 2*sizeof(u32));
3992 estats->error_bytes_received_hi = 0;
3993 estats->error_bytes_received_lo = 0;
3994 estats->etherstatsoverrsizepkts_hi = 0;
3995 estats->etherstatsoverrsizepkts_lo = 0;
3996 estats->no_buff_discard_hi = 0;
3997 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003998
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003999 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004000 struct bnx2x_fastpath *fp = &bp->fp[i];
4001 int cl_id = fp->cl_id;
4002 struct tstorm_per_client_stats *tclient =
4003 &stats->tstorm_common.client_statistics[cl_id];
4004 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005 struct ustorm_per_client_stats *uclient =
4006 &stats->ustorm_common.client_statistics[cl_id];
4007 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008 struct xstorm_per_client_stats *xclient =
4009 &stats->xstorm_common.client_statistics[cl_id];
4010 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012 u32 diff;
4013
4014 /* are storm stats valid? */
4015 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016 bp->stats_counter) {
4017 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018 " xstorm counter (%d) != stats_counter (%d)\n",
4019 i, xclient->stats_counter, bp->stats_counter);
4020 return -1;
4021 }
4022 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023 bp->stats_counter) {
4024 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025 " tstorm counter (%d) != stats_counter (%d)\n",
4026 i, tclient->stats_counter, bp->stats_counter);
4027 return -2;
4028 }
4029 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030 bp->stats_counter) {
4031 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032 " ustorm counter (%d) != stats_counter (%d)\n",
4033 i, uclient->stats_counter, bp->stats_counter);
4034 return -4;
4035 }
4036
4037 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004038 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004039 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004040 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042 ADD_64(qstats->total_bytes_received_hi,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044 qstats->total_bytes_received_lo,
4045 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047 ADD_64(qstats->total_bytes_received_hi,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049 qstats->total_bytes_received_lo,
4050 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052 qstats->valid_bytes_received_hi =
4053 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004054 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004055 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004056
Eilon Greensteinde832a52009-02-12 08:36:33 +00004057 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004058 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004059 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004060 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004061
4062 ADD_64(qstats->total_bytes_received_hi,
4063 qstats->error_bytes_received_hi,
4064 qstats->total_bytes_received_lo,
4065 qstats->error_bytes_received_lo);
4066
4067 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068 total_unicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070 total_multicast_packets_received);
4071 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072 total_broadcast_packets_received);
4073 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074 etherstatsoverrsizepkts);
4075 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078 total_unicast_packets_received);
4079 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080 total_multicast_packets_received);
4081 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004088 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004089 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004090 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092 ADD_64(qstats->total_bytes_transmitted_hi,
4093 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094 qstats->total_bytes_transmitted_lo,
4095 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097 ADD_64(qstats->total_bytes_transmitted_hi,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099 qstats->total_bytes_transmitted_lo,
4100 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004101
4102 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103 total_unicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105 total_multicast_packets_transmitted);
4106 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107 total_broadcast_packets_transmitted);
4108
4109 old_tclient->checksum_discard = tclient->checksum_discard;
4110 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 qstats->total_bytes_received_hi,
4114 fstats->total_bytes_received_lo,
4115 qstats->total_bytes_received_lo);
4116 ADD_64(fstats->total_bytes_transmitted_hi,
4117 qstats->total_bytes_transmitted_hi,
4118 fstats->total_bytes_transmitted_lo,
4119 qstats->total_bytes_transmitted_lo);
4120 ADD_64(fstats->total_unicast_packets_received_hi,
4121 qstats->total_unicast_packets_received_hi,
4122 fstats->total_unicast_packets_received_lo,
4123 qstats->total_unicast_packets_received_lo);
4124 ADD_64(fstats->total_multicast_packets_received_hi,
4125 qstats->total_multicast_packets_received_hi,
4126 fstats->total_multicast_packets_received_lo,
4127 qstats->total_multicast_packets_received_lo);
4128 ADD_64(fstats->total_broadcast_packets_received_hi,
4129 qstats->total_broadcast_packets_received_hi,
4130 fstats->total_broadcast_packets_received_lo,
4131 qstats->total_broadcast_packets_received_lo);
4132 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133 qstats->total_unicast_packets_transmitted_hi,
4134 fstats->total_unicast_packets_transmitted_lo,
4135 qstats->total_unicast_packets_transmitted_lo);
4136 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137 qstats->total_multicast_packets_transmitted_hi,
4138 fstats->total_multicast_packets_transmitted_lo,
4139 qstats->total_multicast_packets_transmitted_lo);
4140 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141 qstats->total_broadcast_packets_transmitted_hi,
4142 fstats->total_broadcast_packets_transmitted_lo,
4143 qstats->total_broadcast_packets_transmitted_lo);
4144 ADD_64(fstats->valid_bytes_received_hi,
4145 qstats->valid_bytes_received_hi,
4146 fstats->valid_bytes_received_lo,
4147 qstats->valid_bytes_received_lo);
4148
4149 ADD_64(estats->error_bytes_received_hi,
4150 qstats->error_bytes_received_hi,
4151 estats->error_bytes_received_lo,
4152 qstats->error_bytes_received_lo);
4153 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154 qstats->etherstatsoverrsizepkts_hi,
4155 estats->etherstatsoverrsizepkts_lo,
4156 qstats->etherstatsoverrsizepkts_lo);
4157 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159 }
4160
4161 ADD_64(fstats->total_bytes_received_hi,
4162 estats->rx_stat_ifhcinbadoctets_hi,
4163 fstats->total_bytes_received_lo,
4164 estats->rx_stat_ifhcinbadoctets_lo);
4165
4166 memcpy(estats, &(fstats->total_bytes_received_hi),
4167 sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 estats->rx_stat_dot3statsframestoolong_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004173 ADD_64(estats->error_bytes_received_hi,
4174 estats->rx_stat_ifhcinbadoctets_hi,
4175 estats->error_bytes_received_lo,
4176 estats->rx_stat_ifhcinbadoctets_lo);
4177
Eilon Greensteinde832a52009-02-12 08:36:33 +00004178 if (bp->port.pmf) {
4179 estats->mac_filter_discard =
4180 le32_to_cpu(tport->mac_filter_discard);
4181 estats->xxoverflow_discard =
4182 le32_to_cpu(tport->xxoverflow_discard);
4183 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004184 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004185 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004187
4188 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
Eilon Greensteinde832a52009-02-12 08:36:33 +00004190 bp->stats_pending = 0;
4191
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004192 return 0;
4193}
4194
4195static void bnx2x_net_stats_update(struct bnx2x *bp)
4196{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004198 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004199 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200
4201 nstats->rx_packets =
4202 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206 nstats->tx_packets =
4207 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
Eilon Greensteinde832a52009-02-12 08:36:33 +00004211 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214
Eilon Greensteinde832a52009-02-12 08:36:33 +00004215 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004216 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004217 nstats->rx_dropped +=
4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220 nstats->tx_dropped = 0;
4221
4222 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004223 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004224
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004225 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004226 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004227
4228 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004229 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232 bnx2x_hilo(&estats->brb_truncate_hi);
4233 nstats->rx_crc_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235 nstats->rx_frame_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238 nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240 nstats->rx_errors = nstats->rx_length_errors +
4241 nstats->rx_over_errors +
4242 nstats->rx_crc_errors +
4243 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004244 nstats->rx_fifo_errors +
4245 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004246
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004247 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004248 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250 nstats->tx_carrier_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004252 nstats->tx_fifo_errors = 0;
4253 nstats->tx_heartbeat_errors = 0;
4254 nstats->tx_window_errors = 0;
4255
4256 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004257 nstats->tx_carrier_errors +
4258 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259}
4260
4261static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262{
4263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264 int i;
4265
4266 estats->driver_xoff = 0;
4267 estats->rx_err_discard_pkt = 0;
4268 estats->rx_skb_alloc_failed = 0;
4269 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004270 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273 estats->driver_xoff += qstats->driver_xoff;
4274 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276 estats->hw_csum_err += qstats->hw_csum_err;
4277 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004278}
4279
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004280static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004281{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004283
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004284 if (*stats_comp != DMAE_COMP_VAL)
4285 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004286
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004287 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004288 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289
Eilon Greensteinde832a52009-02-12 08:36:33 +00004290 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292 bnx2x_panic();
4293 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294 }
4295
Eilon Greensteinde832a52009-02-12 08:36:33 +00004296 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp);
4298
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004299 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004300 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004301 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004302 struct tstorm_per_client_stats *old_tclient =
4303 &bp->fp->old_tclient;
4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004305 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004306 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004307 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004308
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4311 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004312 bnx2x_tx_avail(fp0_tx),
4313 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004314 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4315 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004316 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317 fp0_rx->rx_comp_cons),
4318 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004319 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4320 "brb truncate %u\n",
4321 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322 qstats->driver_xoff,
4323 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004324 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004325 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326 "mac_discard %u mac_filter_discard %u "
4327 "xxovrflow_discard %u brb_truncate_discard %u "
4328 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004329 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004330 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331 bnx2x_hilo(&qstats->no_buff_discard_hi),
4332 estats->mac_discard, estats->mac_filter_discard,
4333 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004334 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335
4336 for_each_queue(bp, i) {
4337 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338 bnx2x_fp(bp, i, tx_pkt),
4339 bnx2x_fp(bp, i, rx_pkt),
4340 bnx2x_fp(bp, i, rx_calls));
4341 }
4342 }
4343
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004344 bnx2x_hw_stats_post(bp);
4345 bnx2x_storm_stats_post(bp);
4346}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004348static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349{
4350 struct dmae_command *dmae;
4351 u32 opcode;
4352 int loader_idx = PMF_DMAE_C(bp);
4353 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004354
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004355 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004356
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004357 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358 DMAE_CMD_C_ENABLE |
4359 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004361 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004363 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004364#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004365 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368 if (bp->port.port_stx) {
4369
4370 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371 if (bp->func_stx)
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373 else
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004378 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004379 dmae->len = sizeof(struct host_port_stats) >> 2;
4380 if (bp->func_stx) {
4381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382 dmae->comp_addr_hi = 0;
4383 dmae->comp_val = 1;
4384 } else {
4385 dmae->comp_addr_lo =
4386 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi =
4388 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004390
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004391 *stats_comp = 0;
4392 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004393 }
4394
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004395 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004396
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->dst_addr_lo = bp->func_stx >> 2;
4402 dmae->dst_addr_hi = 0;
4403 dmae->len = sizeof(struct host_func_stats) >> 2;
4404 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4407
4408 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004409 }
4410}
4411
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004412static void bnx2x_stats_stop(struct bnx2x *bp)
4413{
4414 int update = 0;
4415
4416 bnx2x_stats_comp(bp);
4417
4418 if (bp->port.pmf)
4419 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421 update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423 if (update) {
4424 bnx2x_net_stats_update(bp);
4425
4426 if (bp->port.pmf)
4427 bnx2x_port_stats_stop(bp);
4428
4429 bnx2x_hw_stats_post(bp);
4430 bnx2x_stats_comp(bp);
4431 }
4432}
4433
4434static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435{
4436}
4437
4438static const struct {
4439 void (*action)(struct bnx2x *bp);
4440 enum bnx2x_stats_state next_state;
4441} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442/* state event */
4443{
4444/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4446/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448},
4449{
4450/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4451/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4452/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4453/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4454}
4455};
4456
4457static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458{
4459 enum bnx2x_stats_state state = bp->stats_state;
4460
4461 bnx2x_stats_stm[state][event].action(bp);
4462 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
Eilon Greenstein89246652009-08-12 08:23:56 +00004464 /* Make sure the state has been "changed" */
4465 smp_wmb();
4466
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state);
4470}
4471
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004472static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473{
4474 struct dmae_command *dmae;
4475 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477 /* sanity */
4478 if (!bp->port.pmf || !bp->port.port_stx) {
4479 BNX2X_ERR("BUG!\n");
4480 return;
4481 }
4482
4483 bp->executer_idx = 0;
4484
4485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489#ifdef __BIG_ENDIAN
4490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491#else
4492 DMAE_CMD_ENDIANITY_DW_SWAP |
4493#endif
4494 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499 dmae->dst_addr_hi = 0;
4500 dmae->len = sizeof(struct host_port_stats) >> 2;
4501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_val = DMAE_COMP_VAL;
4504
4505 *stats_comp = 0;
4506 bnx2x_hw_stats_post(bp);
4507 bnx2x_stats_comp(bp);
4508}
4509
4510static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511{
4512 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513 int port = BP_PORT(bp);
4514 int func;
4515 u32 func_stx;
4516
4517 /* sanity */
4518 if (!bp->port.pmf || !bp->func_stx) {
4519 BNX2X_ERR("BUG!\n");
4520 return;
4521 }
4522
4523 /* save our func_stx */
4524 func_stx = bp->func_stx;
4525
4526 for (vn = VN_0; vn < vn_max; vn++) {
4527 func = 2*vn + port;
4528
4529 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530 bnx2x_func_stats_init(bp);
4531 bnx2x_hw_stats_post(bp);
4532 bnx2x_stats_comp(bp);
4533 }
4534
4535 /* restore our func_stx */
4536 bp->func_stx = func_stx;
4537}
4538
4539static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540{
4541 struct dmae_command *dmae = &bp->stats_dmae;
4542 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544 /* sanity */
4545 if (!bp->func_stx) {
4546 BNX2X_ERR("BUG!\n");
4547 return;
4548 }
4549
4550 bp->executer_idx = 0;
4551 memset(dmae, 0, sizeof(struct dmae_command));
4552
4553 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556#ifdef __BIG_ENDIAN
4557 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558#else
4559 DMAE_CMD_ENDIANITY_DW_SWAP |
4560#endif
4561 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563 dmae->src_addr_lo = bp->func_stx >> 2;
4564 dmae->src_addr_hi = 0;
4565 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->len = sizeof(struct host_func_stats) >> 2;
4568 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_val = DMAE_COMP_VAL;
4571
4572 *stats_comp = 0;
4573 bnx2x_hw_stats_post(bp);
4574 bnx2x_stats_comp(bp);
4575}
4576
4577static void bnx2x_stats_init(struct bnx2x *bp)
4578{
4579 int port = BP_PORT(bp);
4580 int func = BP_FUNC(bp);
4581 int i;
4582
4583 bp->stats_pending = 0;
4584 bp->executer_idx = 0;
4585 bp->stats_counter = 0;
4586
4587 /* port and func stats for management */
4588 if (!BP_NOMCP(bp)) {
4589 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592 } else {
4593 bp->port.port_stx = 0;
4594 bp->func_stx = 0;
4595 }
4596 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4597 bp->port.port_stx, bp->func_stx);
4598
4599 /* port stats */
4600 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601 bp->port.old_nig_stats.brb_discard =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603 bp->port.old_nig_stats.brb_truncate =
4604 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610 /* function stats */
4611 for_each_queue(bp, i) {
4612 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614 memset(&fp->old_tclient, 0,
4615 sizeof(struct tstorm_per_client_stats));
4616 memset(&fp->old_uclient, 0,
4617 sizeof(struct ustorm_per_client_stats));
4618 memset(&fp->old_xclient, 0,
4619 sizeof(struct xstorm_per_client_stats));
4620 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621 }
4622
4623 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626 bp->stats_state = STATS_STATE_DISABLED;
4627
4628 if (bp->port.pmf) {
4629 if (bp->port.port_stx)
4630 bnx2x_port_stats_base_init(bp);
4631
4632 if (bp->func_stx)
4633 bnx2x_func_stats_base_init(bp);
4634
4635 } else if (bp->func_stx)
4636 bnx2x_func_stats_base_update(bp);
4637}
4638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004639static void bnx2x_timer(unsigned long data)
4640{
4641 struct bnx2x *bp = (struct bnx2x *) data;
4642
4643 if (!netif_running(bp->dev))
4644 return;
4645
4646 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004647 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004648
4649 if (poll) {
4650 struct bnx2x_fastpath *fp = &bp->fp[0];
4651 int rc;
4652
Eilon Greenstein7961f792009-03-02 07:59:31 +00004653 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004654 rc = bnx2x_rx_int(fp, 1000);
4655 }
4656
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 if (!BP_NOMCP(bp)) {
4658 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659 u32 drv_pulse;
4660 u32 mcp_pulse;
4661
4662 ++bp->fw_drv_pulse_wr_seq;
4663 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664 /* TBD - add SYSTEM_TIME */
4665 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004666 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004669 MCP_PULSE_SEQ_MASK);
4670 /* The delta between driver pulse and mcp response
4671 * should be 1 (before mcp response) or 0 (after mcp response)
4672 */
4673 if ((drv_pulse != mcp_pulse) &&
4674 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675 /* someone lost a heartbeat... */
4676 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677 drv_pulse, mcp_pulse);
4678 }
4679 }
4680
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004681 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004683
Eliezer Tamirf1410642008-02-28 11:51:50 -08004684timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685 mod_timer(&bp->timer, jiffies + bp->current_interval);
4686}
4687
4688/* end of Statistics */
4689
4690/* nic init */
4691
4692/*
4693 * nic init service functions
4694 */
4695
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004696static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004698 int port = BP_PORT(bp);
4699
Eilon Greensteinca003922009-08-12 22:53:28 -07004700 /* "CSTORM" */
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004707}
4708
Eilon Greenstein5c862842008-08-13 15:51:48 -07004709static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004711{
4712 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004713 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004715 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004716
4717 /* USTORM */
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4719 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004720 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004721
Eilon Greensteinca003922009-08-12 22:53:28 -07004722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004726 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729
4730 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004733
4734 /* CSTORM */
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4736 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004737 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004738
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004740 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004741 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004742 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004745 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004746
4747 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004749 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004751 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752}
4753
4754static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755{
4756 int func = BP_FUNC(bp);
4757
Eilon Greensteinca003922009-08-12 22:53:28 -07004758 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004759 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004761 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763 sizeof(struct cstorm_def_status_block_u)/4);
4764 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766 sizeof(struct cstorm_def_status_block_c)/4);
4767 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004768 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004770}
4771
4772static void bnx2x_init_def_sb(struct bnx2x *bp,
4773 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004775{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004776 int port = BP_PORT(bp);
4777 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004778 int index, val, reg_offset;
4779 u64 section;
4780
4781 /* ATTN */
4782 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004785
Eliezer Tamir49d66772008-02-28 11:53:13 -08004786 bp->attn_state = 0;
4787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004788 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004791 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004792 bp->attn_group[index].sig[0] = REG_RD(bp,
4793 reg_offset + 0x10*index);
4794 bp->attn_group[index].sig[1] = REG_RD(bp,
4795 reg_offset + 0x4 + 0x10*index);
4796 bp->attn_group[index].sig[2] = REG_RD(bp,
4797 reg_offset + 0x8 + 0x10*index);
4798 bp->attn_group[index].sig[3] = REG_RD(bp,
4799 reg_offset + 0xc + 0x10*index);
4800 }
4801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004802 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803 HC_REG_ATTN_MSG0_ADDR_L);
4804
4805 REG_WR(bp, reg_offset, U64_LO(section));
4806 REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004811 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004812 REG_WR(bp, reg_offset, val);
4813
4814 /* USTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004817 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004818
Eilon Greensteinca003922009-08-12 22:53:28 -07004819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004826
4827 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004830
4831 /* CSTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004834 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004835
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004837 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004838 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004839 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004840 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004843
4844 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004846 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004847
4848 /* TSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004851 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004852
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004854 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004855 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004856 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004857 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004858 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004859 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004860
4861 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004863 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004864
4865 /* XSTORM */
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004868 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004869
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004871 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004872 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004873 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004874 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004875 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004876 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004877
4878 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004881
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004882 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004883 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004884
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004885 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886}
4887
4888static void bnx2x_update_coalesce(struct bnx2x *bp)
4889{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004890 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004891 int i;
4892
4893 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004894 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004895
4896 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004900 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07004901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004905
4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004910 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004915 }
4916}
4917
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004918static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919 struct bnx2x_fastpath *fp, int last)
4920{
4921 int i;
4922
4923 for (i = 0; i < last; i++) {
4924 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925 struct sk_buff *skb = rx_buf->skb;
4926
4927 if (skb == NULL) {
4928 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929 continue;
4930 }
4931
4932 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933 pci_unmap_single(bp->pdev,
4934 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004935 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004936
4937 dev_kfree_skb(skb);
4938 rx_buf->skb = NULL;
4939 }
4940}
4941
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004942static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004944 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004945 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946 ETH_MAX_AGGREGATION_QUEUES_E1H;
4947 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004948 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004949
Eilon Greenstein87942b42009-02-12 08:36:49 +00004950 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004951 DP(NETIF_MSG_IFUP,
4952 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004953
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004954 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004955
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004956 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004957 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004958
Eilon Greenstein32626232008-08-13 15:51:07 -07004959 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004960 fp->tpa_pool[i].skb =
4961 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962 if (!fp->tpa_pool[i].skb) {
4963 BNX2X_ERR("Failed to allocate TPA "
4964 "skb pool for queue[%d] - "
4965 "disabling TPA on this "
4966 "queue!\n", j);
4967 bnx2x_free_tpa_pool(bp, fp, i);
4968 fp->disable_tpa = 1;
4969 break;
4970 }
4971 pci_unmap_addr_set((struct sw_rx_bd *)
4972 &bp->fp->tpa_pool[i],
4973 mapping, 0);
4974 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975 }
4976 }
4977 }
4978
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004979 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004980 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982 fp->rx_bd_cons = 0;
4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004985
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004986 /* "next page" elements initialization */
4987 /* SGE ring */
4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989 struct eth_rx_sge *sge;
4990
4991 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992 sge->addr_hi =
4993 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995 sge->addr_lo =
4996 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998 }
4999
5000 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004 struct eth_rx_bd *rx_bd;
5005
5006 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007 rx_bd->addr_hi =
5008 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005009 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005010 rx_bd->addr_lo =
5011 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005012 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005013 }
5014
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005015 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005016 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017 struct eth_rx_cqe_next_page *nextpg;
5018
5019 nextpg = (struct eth_rx_cqe_next_page *)
5020 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021 nextpg->addr_hi =
5022 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005023 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024 nextpg->addr_lo =
5025 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005026 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005027 }
5028
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005029 /* Allocate SGEs and initialize the ring elements */
5030 for (i = 0, ring_prod = 0;
5031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005032
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034 BNX2X_ERR("was only able to allocate "
5035 "%d rx sges\n", i);
5036 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037 /* Cleanup already allocated elements */
5038 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005039 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005040 fp->disable_tpa = 1;
5041 ring_prod = 0;
5042 break;
5043 }
5044 ring_prod = NEXT_SGE_IDX(ring_prod);
5045 }
5046 fp->rx_sge_prod = ring_prod;
5047
5048 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005049 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005050 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005051 for (i = 0; i < bp->rx_ring_size; i++) {
5052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005054 "%d rx skbs on queue[%d]\n", i, j);
5055 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005056 break;
5057 }
5058 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005059 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005060 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005061 }
5062
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005063 fp->rx_bd_prod = ring_prod;
5064 /* must not have more available CQEs than BDs */
5065 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005067 fp->rx_pkt = fp->rx_calls = 0;
5068
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005069 /* Warning!
5070 * this will generate an interrupt (to the TSTORM)
5071 * must only be done after chip is initialized
5072 */
5073 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005075 if (j != 0)
5076 continue;
5077
5078 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005079 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005080 U64_LO(fp->rx_comp_mapping));
5081 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005082 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005083 U64_HI(fp->rx_comp_mapping));
5084 }
5085}
5086
5087static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088{
5089 int i, j;
5090
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005091 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005092 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005095 struct eth_tx_next_bd *tx_next_bd =
5096 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005097
Eilon Greensteinca003922009-08-12 22:53:28 -07005098 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005099 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005100 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005101 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005102 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005103 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005104 }
5105
Eilon Greensteinca003922009-08-12 22:53:28 -07005106 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107 fp->tx_db.data.zero_fill1 = 0;
5108 fp->tx_db.data.prod = 0;
5109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110 fp->tx_pkt_prod = 0;
5111 fp->tx_pkt_cons = 0;
5112 fp->tx_bd_prod = 0;
5113 fp->tx_bd_cons = 0;
5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115 fp->tx_pkt = 0;
5116 }
5117}
5118
5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005121 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005122
5123 spin_lock_init(&bp->spq_lock);
5124
5125 bp->spq_left = MAX_SPQ_PENDING;
5126 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005127 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128 bp->spq_prod_bd = bp->spq;
5129 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005131 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005132 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005133 REG_WR(bp,
5134 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005135 U64_HI(bp->spq_mapping));
5136
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005137 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138 bp->spq_prod_idx);
5139}
5140
5141static void bnx2x_init_context(struct bnx2x *bp)
5142{
5143 int i;
5144
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005145 /* Rx */
5146 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005149 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005150
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005151 context->ustorm_st_context.common.sb_index_numbers =
5152 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005153 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005154 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005155 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005156 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158 context->ustorm_st_context.common.statistics_counter_id =
5159 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005160 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005161 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005163 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005164 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005165 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005166 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005167 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005168 if (!fp->disable_tpa) {
5169 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005170 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005171 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005172 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005174 context->ustorm_st_context.common.sge_page_base_hi =
5175 U64_HI(fp->rx_sge_mapping);
5176 context->ustorm_st_context.common.sge_page_base_lo =
5177 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005178
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181 context->ustorm_st_context.common.max_sges_for_packet =
5182 ((context->ustorm_st_context.common.
5183 max_sges_for_packet + PAGES_PER_SGE - 1) &
5184 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005185 }
5186
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005187 context->ustorm_ag_context.cdu_usage =
5188 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189 CDU_REGION_NUMBER_UCM_AG,
5190 ETH_CONNECTION_TYPE);
5191
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005192 context->xstorm_ag_context.cdu_reserved =
5193 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194 CDU_REGION_NUMBER_XCM_AG,
5195 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005196 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005197
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005198 /* Tx */
5199 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005200 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005202 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005203
5204 context->cstorm_st_context.sb_index_number =
5205 C_SB_ETH_TX_CQ_INDEX;
5206 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208 context->xstorm_st_context.tx_bd_page_base_hi =
5209 U64_HI(fp->tx_desc_mapping);
5210 context->xstorm_st_context.tx_bd_page_base_lo =
5211 U64_LO(fp->tx_desc_mapping);
5212 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005215}
5216
5217static void bnx2x_init_ind_table(struct bnx2x *bp)
5218{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005219 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005220 int i;
5221
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005222 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005223 return;
5224
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005225 DP(NETIF_MSG_IFUP,
5226 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005230 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231}
5232
Eliezer Tamir49d66772008-02-28 11:53:13 -08005233static void bnx2x_set_client_config(struct bnx2x *bp)
5234{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005235 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005236 int port = BP_PORT(bp);
5237 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005238
Eilon Greensteine7799c52009-01-14 21:30:27 -08005239 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005240 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005241 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005243#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005244 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005245 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005246 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005247 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248 }
5249#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005250
5251 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005252 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
Eliezer Tamir49d66772008-02-28 11:53:13 -08005254 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005255 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005256 ((u32 *)&tstorm_client)[0]);
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005258 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005259 ((u32 *)&tstorm_client)[1]);
5260 }
5261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005262 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005264}
5265
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005269 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005270 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005271 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005272 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005273 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005274 /* All but management unicast packets should pass to the host as well */
5275 u32 llh_mask =
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280
Eilon Greenstein3196a882008-08-13 15:58:49 -07005281 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005282
5283 switch (mode) {
5284 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005285 tstorm_mac_filter.ucast_drop_all = mask;
5286 tstorm_mac_filter.mcast_drop_all = mask;
5287 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005288 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005289
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005290 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005291 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005293
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005294 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005295 tstorm_mac_filter.mcast_accept_all = mask;
5296 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005297 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005298
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005299 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005300 tstorm_mac_filter.ucast_accept_all = mask;
5301 tstorm_mac_filter.mcast_accept_all = mask;
5302 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005303 /* pass management unicast packets as well */
5304 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005306
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005307 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005308 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310 }
5311
Eilon Greenstein581ce432009-07-29 00:20:04 +00005312 REG_WR(bp,
5313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314 llh_mask);
5315
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005318 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005319 ((u32 *)&tstorm_mac_filter)[i]);
5320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005321/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322 ((u32 *)&tstorm_mac_filter)[i]); */
5323 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005324
Eliezer Tamir49d66772008-02-28 11:53:13 -08005325 if (mode != BNX2X_RX_MODE_NONE)
5326 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327}
5328
Eilon Greenstein471de712008-08-13 15:49:35 -07005329static void bnx2x_init_internal_common(struct bnx2x *bp)
5330{
5331 int i;
5332
5333 /* Zero this manually as its initialization is
5334 currently missing in the initTool */
5335 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336 REG_WR(bp, BAR_USTRORM_INTMEM +
5337 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338}
5339
5340static void bnx2x_init_internal_port(struct bnx2x *bp)
5341{
5342 int port = BP_PORT(bp);
5343
Eilon Greensteinca003922009-08-12 22:53:28 -07005344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346 REG_WR(bp,
5347 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005348 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350}
5351
5352static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005353{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005354 struct tstorm_eth_function_common_config tstorm_config = {0};
5355 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005356 int port = BP_PORT(bp);
5357 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005358 int i, j;
5359 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005360 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005361
5362 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005363 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005364 tstorm_config.rss_result_mask = MULTI_MASK;
5365 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005366
5367 /* Enable TPA if needed */
5368 if (bp->flags & TPA_ENABLE_FLAG)
5369 tstorm_config.config_flags |=
5370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005372 if (IS_E1HMF(bp))
5373 tstorm_config.config_flags |=
5374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005375
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005376 tstorm_config.leading_client_id = BP_L_ID(bp);
5377
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005378 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005379 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005380 (*(u32 *)&tstorm_config));
5381
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005384 bnx2x_set_storm_rx_mode(bp);
5385
Eilon Greensteinde832a52009-02-12 08:36:33 +00005386 for_each_queue(bp, i) {
5387 u8 cl_id = bp->fp[i].cl_id;
5388
5389 /* reset xstorm per client statistics */
5390 offset = BAR_XSTRORM_INTMEM +
5391 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392 for (j = 0;
5393 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394 REG_WR(bp, offset + j*4, 0);
5395
5396 /* reset tstorm per client statistics */
5397 offset = BAR_TSTRORM_INTMEM +
5398 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399 for (j = 0;
5400 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401 REG_WR(bp, offset + j*4, 0);
5402
5403 /* reset ustorm per client statistics */
5404 offset = BAR_USTRORM_INTMEM +
5405 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406 for (j = 0;
5407 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005409 }
5410
5411 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005412 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005413
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005415 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005416 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005417 ((u32 *)&stats_flags)[1]);
5418
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005420 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005421 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005422 ((u32 *)&stats_flags)[1]);
5423
Eilon Greensteinde832a52009-02-12 08:36:33 +00005424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425 ((u32 *)&stats_flags)[0]);
5426 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427 ((u32 *)&stats_flags)[1]);
5428
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005430 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005431 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005432 ((u32 *)&stats_flags)[1]);
5433
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005434 REG_WR(bp, BAR_XSTRORM_INTMEM +
5435 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437 REG_WR(bp, BAR_XSTRORM_INTMEM +
5438 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444 REG_WR(bp, BAR_TSTRORM_INTMEM +
5445 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005447
Eilon Greensteinde832a52009-02-12 08:36:33 +00005448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005455 if (CHIP_IS_E1H(bp)) {
5456 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463 IS_E1HMF(bp));
5464
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005465 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005467 }
5468
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005469 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470 max_agg_size =
5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5473 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005474 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005475 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005476
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005478 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005479 U64_LO(fp->rx_comp_mapping));
5480 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005481 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005482 U64_HI(fp->rx_comp_mapping));
5483
Eilon Greensteinca003922009-08-12 22:53:28 -07005484 /* Next page */
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005492 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005493 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005494 max_agg_size);
5495 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005496
Eilon Greenstein1c063282009-02-12 08:36:43 +00005497 /* dropless flow control */
5498 if (CHIP_IS_E1H(bp)) {
5499 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501 rx_pause.bd_thr_low = 250;
5502 rx_pause.cqe_thr_low = 250;
5503 rx_pause.cos = 1;
5504 rx_pause.sge_thr_low = 0;
5505 rx_pause.bd_thr_high = 350;
5506 rx_pause.cqe_thr_high = 350;
5507 rx_pause.sge_thr_high = 0;
5508
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005509 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005510 struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512 if (!fp->disable_tpa) {
5513 rx_pause.sge_thr_low = 150;
5514 rx_pause.sge_thr_high = 250;
5515 }
5516
5517
5518 offset = BAR_USTRORM_INTMEM +
5519 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520 fp->cl_id);
5521 for (j = 0;
5522 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523 j++)
5524 REG_WR(bp, offset + j*4,
5525 ((u32 *)&rx_pause)[j]);
5526 }
5527 }
5528
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005529 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531 /* Init rate shaping and fairness contexts */
5532 if (IS_E1HMF(bp)) {
5533 int vn;
5534
5535 /* During init there is no active link
5536 Until link is up, set link rate to 10Gbps */
5537 bp->link_vars.line_speed = SPEED_10000;
5538 bnx2x_init_port_minmax(bp);
5539
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005540 if (!BP_NOMCP(bp))
5541 bp->mf_config =
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005543 bnx2x_calc_vn_weight_sum(bp);
5544
5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005549 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005551
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005552 } else {
5553 /* rate shaping and fairness are disabled */
5554 DP(NETIF_MSG_IFUP,
5555 "single function mode minmax will be disabled\n");
5556 }
5557
5558
5559 /* Store it to internal memory */
5560 if (bp->port.pmf)
5561 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562 REG_WR(bp, BAR_XSTRORM_INTMEM +
5563 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005565}
5566
Eilon Greenstein471de712008-08-13 15:49:35 -07005567static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568{
5569 switch (load_code) {
5570 case FW_MSG_CODE_DRV_LOAD_COMMON:
5571 bnx2x_init_internal_common(bp);
5572 /* no break */
5573
5574 case FW_MSG_CODE_DRV_LOAD_PORT:
5575 bnx2x_init_internal_port(bp);
5576 /* no break */
5577
5578 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579 bnx2x_init_internal_func(bp);
5580 break;
5581
5582 default:
5583 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584 break;
5585 }
5586}
5587
5588static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005589{
5590 int i;
5591
5592 for_each_queue(bp, i) {
5593 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005595 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005596 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005597 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005598 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005599#ifdef BCM_CNIC
5600 fp->sb_id = fp->cl_id + 1;
5601#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005602 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005603#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005604 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005607 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005608 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005609 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005610 }
5611
Eilon Greenstein16119782009-03-02 07:59:27 +00005612 /* ensure status block indices were read */
5613 rmb();
5614
5615
Eilon Greenstein5c862842008-08-13 15:51:48 -07005616 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617 DEF_SB_ID);
5618 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005619 bnx2x_update_coalesce(bp);
5620 bnx2x_init_rx_rings(bp);
5621 bnx2x_init_tx_ring(bp);
5622 bnx2x_init_sp_ring(bp);
5623 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005624 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005625 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005626 bnx2x_stats_init(bp);
5627
5628 /* At this point, we are ready for interrupts */
5629 atomic_set(&bp->intr_sem, 0);
5630
5631 /* flush all before enabling interrupts */
5632 mb();
5633 mmiowb();
5634
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005635 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005636
5637 /* Check for SPIO5 */
5638 bnx2x_attn_int_deasserted0(bp,
5639 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005641}
5642
5643/* end of nic init */
5644
5645/*
5646 * gzip service functions
5647 */
5648
5649static int bnx2x_gunzip_init(struct bnx2x *bp)
5650{
5651 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652 &bp->gunzip_mapping);
5653 if (bp->gunzip_buf == NULL)
5654 goto gunzip_nomem1;
5655
5656 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657 if (bp->strm == NULL)
5658 goto gunzip_nomem2;
5659
5660 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661 GFP_KERNEL);
5662 if (bp->strm->workspace == NULL)
5663 goto gunzip_nomem3;
5664
5665 return 0;
5666
5667gunzip_nomem3:
5668 kfree(bp->strm);
5669 bp->strm = NULL;
5670
5671gunzip_nomem2:
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5675
5676gunzip_nomem1:
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005678 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005679 return -ENOMEM;
5680}
5681
5682static void bnx2x_gunzip_end(struct bnx2x *bp)
5683{
5684 kfree(bp->strm->workspace);
5685
5686 kfree(bp->strm);
5687 bp->strm = NULL;
5688
5689 if (bp->gunzip_buf) {
5690 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691 bp->gunzip_mapping);
5692 bp->gunzip_buf = NULL;
5693 }
5694}
5695
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005696static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005697{
5698 int n, rc;
5699
5700 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005701 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005703 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005704 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005705
5706 n = 10;
5707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005708#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005709
5710 if (zbuf[3] & FNAME)
5711 while ((zbuf[n++] != 0) && (n < len));
5712
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005713 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005714 bp->strm->avail_in = len - n;
5715 bp->strm->next_out = bp->gunzip_buf;
5716 bp->strm->avail_out = FW_BUF_SIZE;
5717
5718 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719 if (rc != Z_OK)
5720 return rc;
5721
5722 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg);
5726
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730 " gunzip_outlen (%d) not aligned\n",
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2;
5733
5734 zlib_inflateEnd(bp->strm);
5735
5736 if (rc == Z_STREAM_END)
5737 return 0;
5738
5739 return rc;
5740}
5741
5742/* nic load/unload */
5743
5744/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005745 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005746 */
5747
5748/* send a NIG loopback debug packet */
5749static void bnx2x_lb_pckt(struct bnx2x *bp)
5750{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005751 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005752
5753 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005754 wb_write[0] = 0x55555555;
5755 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005756 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005757 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005758
5759 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005760 wb_write[0] = 0x09000000;
5761 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005762 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005763 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005764}
5765
5766/* some of the internal memories
5767 * are not directly readable from the driver
5768 * to test them we send debug packets
5769 */
5770static int bnx2x_int_mem_test(struct bnx2x *bp)
5771{
5772 int factor;
5773 int count, i;
5774 u32 val = 0;
5775
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005776 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005777 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005778 else if (CHIP_REV_IS_EMUL(bp))
5779 factor = 200;
5780 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005781 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005782
5783 DP(NETIF_MSG_HW, "start part1\n");
5784
5785 /* Disable inputs of parser neighbor blocks */
5786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005790
5791 /* Write 0 to parser credits for CFC search request */
5792 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794 /* send Ethernet packet */
5795 bnx2x_lb_pckt(bp);
5796
5797 /* TODO do i reset NIG statistic? */
5798 /* Wait until NIG register shows 1 packet of size 0x10 */
5799 count = 1000 * factor;
5800 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005804 if (val == 0x10)
5805 break;
5806
5807 msleep(10);
5808 count--;
5809 }
5810 if (val != 0x10) {
5811 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5812 return -1;
5813 }
5814
5815 /* Wait until PRS register shows 1 packet */
5816 count = 1000 * factor;
5817 while (count) {
5818 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819 if (val == 1)
5820 break;
5821
5822 msleep(10);
5823 count--;
5824 }
5825 if (val != 0x1) {
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827 return -2;
5828 }
5829
5830 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005831 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005832 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005833 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005834 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005835 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005837
5838 DP(NETIF_MSG_HW, "part2\n");
5839
5840 /* Disable inputs of parser neighbor blocks */
5841 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005844 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005845
5846 /* Write 0 to parser credits for CFC search request */
5847 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849 /* send 10 Ethernet packets */
5850 for (i = 0; i < 10; i++)
5851 bnx2x_lb_pckt(bp);
5852
5853 /* Wait until NIG register shows 10 + 1
5854 packets of size 11*0x10 = 0xb0 */
5855 count = 1000 * factor;
5856 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005860 if (val == 0xb0)
5861 break;
5862
5863 msleep(10);
5864 count--;
5865 }
5866 if (val != 0xb0) {
5867 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5868 return -3;
5869 }
5870
5871 /* Wait until PRS register shows 2 packets */
5872 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873 if (val != 2)
5874 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5875
5876 /* Write 1 to parser credits for CFC search request */
5877 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879 /* Wait until PRS register shows 3 packets */
5880 msleep(10 * factor);
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883 if (val != 3)
5884 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5885
5886 /* clear NIG EOP FIFO */
5887 for (i = 0; i < 11; i++)
5888 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890 if (val != 1) {
5891 BNX2X_ERR("clear of NIG failed\n");
5892 return -4;
5893 }
5894
5895 /* Reset and init BRB, PRS, NIG */
5896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897 msleep(50);
5898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005902#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005903 /* set NIC mode */
5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905#endif
5906
5907 /* Enable inputs of parser neighbor blocks */
5908 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005911 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005912
5913 DP(NETIF_MSG_HW, "done\n");
5914
5915 return 0; /* OK */
5916}
5917
5918static void enable_blocks_attention(struct bnx2x *bp)
5919{
5920 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005929/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005931 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005934/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005936 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005940/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942 if (CHIP_REV_IS_FPGA(bp))
5943 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944 else
5945 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005949/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005951 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005953/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005955}
5956
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005957
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005958static void bnx2x_reset_common(struct bnx2x *bp)
5959{
5960 /* reset_common */
5961 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962 0xd3ffff7f);
5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964}
5965
Eilon Greenstein573f2032009-08-12 08:24:14 +00005966static void bnx2x_init_pxp(struct bnx2x *bp)
5967{
5968 u16 devctl;
5969 int r_order, w_order;
5970
5971 pci_read_config_word(bp->pdev,
5972 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975 if (bp->mrrs == -1)
5976 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977 else {
5978 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979 r_order = bp->mrrs;
5980 }
5981
5982 bnx2x_init_pxp_arb(bp, r_order, w_order);
5983}
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00005984
5985static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986{
5987 u32 val;
5988 u8 port;
5989 u8 is_required = 0;
5990
5991 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992 SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995 is_required = 1;
5996
5997 /*
5998 * The fan failure mechanism is usually related to the PHY type since
5999 * the power consumption of the board is affected by the PHY. Currently,
6000 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001 */
6002 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003 for (port = PORT_0; port < PORT_MAX; port++) {
6004 u32 phy_type =
6005 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006 external_phy_config) &
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008 is_required |=
6009 ((phy_type ==
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006012 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013 (phy_type ==
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015 }
6016
6017 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019 if (is_required == 0)
6020 return;
6021
6022 /* Fan failure is indicated by SPIO 5 */
6023 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026 /* set to active low mode */
6027 val = REG_RD(bp, MISC_REG_SPIO_INT);
6028 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030 REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032 /* enable interrupt to signal the IGU */
6033 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034 val |= (1 << MISC_REGISTERS_SPIO_5);
6035 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036}
6037
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006038static int bnx2x_init_common(struct bnx2x *bp)
6039{
6040 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006041#ifdef BCM_CNIC
6042 u32 wb_write[2];
6043#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044
6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6046
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006047 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6050
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006051 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006052 if (CHIP_IS_E1H(bp))
6053 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6054
6055 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056 msleep(30);
6057 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006059 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006060 if (CHIP_IS_E1(bp)) {
6061 /* enable HW interrupt from PXP on USDM overflow
6062 bit 16 on INT_MASK_0 */
6063 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006064 }
6065
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006066 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006067 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068
6069#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006070 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006075 /* make sure this value is 0 */
6076 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006078/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006083#endif
6084
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006086#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006090#endif
6091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006092 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006095 /* let the HW do it's magic ... */
6096 msleep(100);
6097 /* finish PXP init */
6098 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099 if (val != 1) {
6100 BNX2X_ERR("PXP2 CFG failed\n");
6101 return -EBUSY;
6102 }
6103 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104 if (val != 1) {
6105 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106 return -EBUSY;
6107 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006109 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006112 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006114 /* clean the DMAE memory */
6115 bp->dmae_ready = 1;
6116 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006118 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006123 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006129
6130#ifdef BCM_CNIC
6131 wb_write[0] = 0;
6132 wb_write[1] = 0;
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140 wb_write, 2);
6141 }
6142 }
6143#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006144 /* soft reset pulse */
6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147
Michael Chan37b091b2009-10-10 13:46:55 +00006148#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006150#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006152 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006153 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154 if (!CHIP_REV_IS_SLOW(bp)) {
6155 /* enable hw interrupt from doorbell Q */
6156 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006158
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006162#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006163 /* set NIC mode */
6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006165#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006166 if (CHIP_IS_E1H(bp))
6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006168
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006173
Eilon Greensteinca003922009-08-12 22:53:28 -07006174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006178
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006183
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006184 /* sync semi rtc */
6185 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186 0x80000000);
6187 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006189
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006190 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006194 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196 REG_WR(bp, i, 0xc0cac01a);
6197 /* TODO: replace with something meaningful */
6198 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006200#ifdef BCM_CNIC
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006213
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006214 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of"
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006218
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006220 val = (4 << 24) + (0 << 12) + 1024;
6221 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006222
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006223 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006224 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006225 /* enable context validation interrupt from CFC */
6226 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228 /* set the thresholds to prevent CFC/CDU race */
6229 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006230
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006231 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006233
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006234 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006235 /* Reset PCIE errors for debug */
6236 REG_WR(bp, 0x2814, 0xffffffff);
6237 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006238
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006239 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006240 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006241 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006242 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006243
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006244 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006245 if (CHIP_IS_E1H(bp)) {
6246 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006250 if (CHIP_REV_IS_SLOW(bp))
6251 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006253 /* finish CFC init */
6254 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255 if (val != 1) {
6256 BNX2X_ERR("CFC LL_INIT failed\n");
6257 return -EBUSY;
6258 }
6259 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260 if (val != 1) {
6261 BNX2X_ERR("CFC AC_INIT failed\n");
6262 return -EBUSY;
6263 }
6264 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265 if (val != 1) {
6266 BNX2X_ERR("CFC CAM_INIT failed\n");
6267 return -EBUSY;
6268 }
6269 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006270
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006271 /* read NIG statistic
6272 to see if this is our first up since powerup */
6273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006275
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006276 /* do internal memory self test */
6277 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278 BNX2X_ERR("internal mem self test failed\n");
6279 return -EBUSY;
6280 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006281
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006282 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006287 bp->port.need_hw_lock = 1;
6288 break;
6289
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006290 default:
6291 break;
6292 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006293
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006294 bnx2x_setup_fan_failure_detection(bp);
6295
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006296 /* clear PXP2 attentions */
6297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006298
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006299 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006300
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006301 if (!BP_NOMCP(bp)) {
6302 bnx2x_acquire_phy_lock(bp);
6303 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304 bnx2x_release_phy_lock(bp);
6305 } else
6306 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006308 return 0;
6309}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006310
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006311static int bnx2x_init_port(struct bnx2x *bp)
6312{
6313 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006314 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006315 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006316 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006317
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006318 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6319
6320 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006321
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006322 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006323 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006324
6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006329
Michael Chan37b091b2009-10-10 13:46:55 +00006330#ifdef BCM_CNIC
6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006332
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006336#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006338
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006339 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006340 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341 /* no pause for emulation and FPGA */
6342 low = 0;
6343 high = 513;
6344 } else {
6345 if (IS_E1HMF(bp))
6346 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347 else if (bp->dev->mtu > 4096) {
6348 if (bp->flags & ONE_PORT_FLAG)
6349 low = 160;
6350 else {
6351 val = bp->dev->mtu;
6352 /* (24*1024 + val*4)/256 */
6353 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354 }
6355 } else
6356 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357 high = low + 56; /* 14*1024/256 */
6358 }
6359 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006363 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006364
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006365 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006366 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006367 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006368 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006369
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006370 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006374
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006375 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006376 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006377
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006378 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006379
6380 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006381 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006382
6383 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006384 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006385 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006386 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006387
6388 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006389 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006390 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006392
Michael Chan37b091b2009-10-10 13:46:55 +00006393#ifdef BCM_CNIC
6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006395#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006398
6399 if (CHIP_IS_E1(bp)) {
6400 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006403 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006404
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006405 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006406 /* init aeu_mask_attn_func_0/1:
6407 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409 * bits 4-7 are used for "per vn group attention" */
6410 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006413 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006414 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006415 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006416 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006417 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006418
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006419 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006420
6421 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006424 /* 0x2 disable e1hov, 0x1 enable */
6425 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426 (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
Eilon Greenstein1c063282009-02-12 08:36:43 +00006428 {
6429 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006433 }
6434
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006435 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006436 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006437
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006438 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440 {
6441 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446 /* The GPIO should be swapped if the swap register is
6447 set and active */
6448 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451 /* Select function upon port-swap configuration */
6452 if (port == 0) {
6453 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454 aeu_gpio_mask = (swap_val && swap_override) ?
6455 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457 } else {
6458 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459 aeu_gpio_mask = (swap_val && swap_override) ?
6460 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462 }
6463 val = REG_RD(bp, offset);
6464 /* add GPIO3 to group */
6465 val |= aeu_gpio_mask;
6466 REG_WR(bp, offset, val);
6467 }
6468 break;
6469
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006472 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006473 {
6474 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006477 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006478 REG_WR(bp, reg_addr, val);
6479 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006480 break;
6481
6482 default:
6483 break;
6484 }
6485
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006486 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006488 return 0;
6489}
6490
6491#define ILT_PER_FUNC (768/2)
6492#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6493/* the phys address is shifted right 12 bits and has an added
6494 1=valid bit added to the 53rd bit
6495 then since this is a wide register(TM)
6496 we split it into two 32 bit writes
6497 */
6498#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6500#define PXP_ONE_ILT(x) (((x) << 10) | x)
6501#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6502
Michael Chan37b091b2009-10-10 13:46:55 +00006503#ifdef BCM_CNIC
6504#define CNIC_ILT_LINES 127
6505#define CNIC_CTX_PER_ILT 16
6506#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006507#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006508#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006509
6510static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511{
6512 int reg;
6513
6514 if (CHIP_IS_E1H(bp))
6515 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516 else /* E1 */
6517 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520}
6521
6522static int bnx2x_init_func(struct bnx2x *bp)
6523{
6524 int port = BP_PORT(bp);
6525 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006526 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006527 int i;
6528
6529 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6530
Eilon Greenstein8badd272009-02-12 08:36:15 +00006531 /* set MSI reconfigure capability */
6532 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533 val = REG_RD(bp, addr);
6534 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535 REG_WR(bp, addr, val);
6536
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006537 i = FUNC_ILT_BASE(func);
6538
6539 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540 if (CHIP_IS_E1H(bp)) {
6541 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543 } else /* E1 */
6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
Michael Chan37b091b2009-10-10 13:46:55 +00006547#ifdef BCM_CNIC
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550 if (CHIP_IS_E1(bp))
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552 else {
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555 }
6556
6557 i++;
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573 }
6574
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006587
6588 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006598
6599 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601 }
6602
6603 /* HC init per function */
6604 if (CHIP_IS_E1H(bp)) {
6605 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006610 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006611
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006612 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006613 REG_WR(bp, 0x2114, 0xffffffff);
6614 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006615
6616 return 0;
6617}
6618
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006619static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620{
6621 int i, rc = 0;
6622
6623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6624 BP_FUNC(bp), load_code);
6625
6626 bp->dmae_ready = 0;
6627 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006628 rc = bnx2x_gunzip_init(bp);
6629 if (rc)
6630 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006631
6632 switch (load_code) {
6633 case FW_MSG_CODE_DRV_LOAD_COMMON:
6634 rc = bnx2x_init_common(bp);
6635 if (rc)
6636 goto init_hw_err;
6637 /* no break */
6638
6639 case FW_MSG_CODE_DRV_LOAD_PORT:
6640 bp->dmae_ready = 1;
6641 rc = bnx2x_init_port(bp);
6642 if (rc)
6643 goto init_hw_err;
6644 /* no break */
6645
6646 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647 bp->dmae_ready = 1;
6648 rc = bnx2x_init_func(bp);
6649 if (rc)
6650 goto init_hw_err;
6651 break;
6652
6653 default:
6654 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655 break;
6656 }
6657
6658 if (!BP_NOMCP(bp)) {
6659 int func = BP_FUNC(bp);
6660
6661 bp->fw_drv_pulse_wr_seq =
6662 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006664 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666
6667 /* this needs to be done before gunzip end */
6668 bnx2x_zero_def_sb(bp);
6669 for_each_queue(bp, i)
6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006671#ifdef BCM_CNIC
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006674
6675init_hw_err:
6676 bnx2x_gunzip_end(bp);
6677
6678 return rc;
6679}
6680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006681static void bnx2x_free_mem(struct bnx2x *bp)
6682{
6683
6684#define BNX2X_PCI_FREE(x, y, size) \
6685 do { \
6686 if (x) { \
6687 pci_free_consistent(bp->pdev, size, x, y); \
6688 x = NULL; \
6689 y = 0; \
6690 } \
6691 } while (0)
6692
6693#define BNX2X_FREE(x) \
6694 do { \
6695 if (x) { \
6696 vfree(x); \
6697 x = NULL; \
6698 } \
6699 } while (0)
6700
6701 int i;
6702
6703 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006704 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006705 for_each_queue(bp, i) {
6706
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006707 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006708 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006710 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006711 }
6712 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006713 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006714
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718 bnx2x_fp(bp, i, rx_desc_mapping),
6719 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722 bnx2x_fp(bp, i, rx_comp_mapping),
6723 sizeof(struct eth_fast_path_rx_cqe) *
6724 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006725
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006726 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006727 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006728 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729 bnx2x_fp(bp, i, rx_sge_mapping),
6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006732 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006733 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006734
6735 /* fastpath tx rings: tx_buf tx_desc */
6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006739 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006740 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006741 /* end of fastpath */
6742
6743 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006744 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006745
6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006747 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006748
Michael Chan37b091b2009-10-10 13:46:55 +00006749#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006756#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006758
6759#undef BNX2X_PCI_FREE
6760#undef BNX2X_KFREE
6761}
6762
6763static int bnx2x_alloc_mem(struct bnx2x *bp)
6764{
6765
6766#define BNX2X_PCI_ALLOC(x, y, size) \
6767 do { \
6768 x = pci_alloc_consistent(bp->pdev, size, y); \
6769 if (x == NULL) \
6770 goto alloc_mem_err; \
6771 memset(x, 0, size); \
6772 } while (0)
6773
6774#define BNX2X_ALLOC(x, size) \
6775 do { \
6776 x = vmalloc(size); \
6777 if (x == NULL) \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6780 } while (0)
6781
6782 int i;
6783
6784 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006785 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786 for_each_queue(bp, i) {
6787 bnx2x_fp(bp, i, bp) = bp;
6788
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006789 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006790 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006792 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006793 }
6794 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006795 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006796
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801 &bnx2x_fp(bp, i, rx_desc_mapping),
6802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805 &bnx2x_fp(bp, i, rx_comp_mapping),
6806 sizeof(struct eth_fast_path_rx_cqe) *
6807 NUM_RCQ_BD);
6808
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006809 /* SGE ring */
6810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813 &bnx2x_fp(bp, i, rx_sge_mapping),
6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006815 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006816 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006817 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006818
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006819 /* fastpath tx rings: tx_buf tx_desc */
6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006824 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006825 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006826 /* end of fastpath */
6827
6828 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829 sizeof(struct host_def_status_block));
6830
6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832 sizeof(struct bnx2x_slowpath));
6833
Michael Chan37b091b2009-10-10 13:46:55 +00006834#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006837 /* allocate searcher T2 table
6838 we allocate 1/4 of alloc num for T2
6839 (which is not entered into the ILT) */
6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
Michael Chan37b091b2009-10-10 13:46:55 +00006842 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006843 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006845
Michael Chan37b091b2009-10-10 13:46:55 +00006846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849 /* QM queues (128*MAX_CONN) */
6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006851
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006854#endif
6855
6856 /* Slow path ring */
6857 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859 return 0;
6860
6861alloc_mem_err:
6862 bnx2x_free_mem(bp);
6863 return -ENOMEM;
6864
6865#undef BNX2X_PCI_ALLOC
6866#undef BNX2X_ALLOC
6867}
6868
6869static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870{
6871 int i;
6872
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006873 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006874 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876 u16 bd_cons = fp->tx_bd_cons;
6877 u16 sw_prod = fp->tx_pkt_prod;
6878 u16 sw_cons = fp->tx_pkt_cons;
6879
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006880 while (sw_cons != sw_prod) {
6881 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882 sw_cons++;
6883 }
6884 }
6885}
6886
6887static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888{
6889 int i, j;
6890
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006891 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006892 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006894 for (i = 0; i < NUM_RX_BD; i++) {
6895 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896 struct sk_buff *skb = rx_buf->skb;
6897
6898 if (skb == NULL)
6899 continue;
6900
6901 pci_unmap_single(bp->pdev,
6902 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006903 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006904
6905 rx_buf->skb = NULL;
6906 dev_kfree_skb(skb);
6907 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006908 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006909 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006911 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006912 }
6913}
6914
6915static void bnx2x_free_skbs(struct bnx2x *bp)
6916{
6917 bnx2x_free_tx_skbs(bp);
6918 bnx2x_free_rx_skbs(bp);
6919}
6920
6921static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006923 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006924
6925 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006927 bp->msix_table[0].vector);
6928
Michael Chan37b091b2009-10-10 13:46:55 +00006929#ifdef BCM_CNIC
6930 offset++;
6931#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006932 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006934 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006935 bnx2x_fp(bp, i, state));
6936
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006937 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006938 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939}
6940
6941static void bnx2x_free_irq(struct bnx2x *bp)
6942{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006943 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006944 bnx2x_free_msix_irqs(bp);
6945 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006946 bp->flags &= ~USING_MSIX_FLAG;
6947
Eilon Greenstein8badd272009-02-12 08:36:15 +00006948 } else if (bp->flags & USING_MSI_FLAG) {
6949 free_irq(bp->pdev->irq, bp->dev);
6950 pci_disable_msi(bp->pdev);
6951 bp->flags &= ~USING_MSI_FLAG;
6952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006953 } else
6954 free_irq(bp->pdev->irq, bp->dev);
6955}
6956
6957static int bnx2x_enable_msix(struct bnx2x *bp)
6958{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006959 int i, rc, offset = 1;
6960 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006961
Eilon Greenstein8badd272009-02-12 08:36:15 +00006962 bp->msix_table[0].entry = igu_vec;
6963 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006964
Michael Chan37b091b2009-10-10 13:46:55 +00006965#ifdef BCM_CNIC
6966 igu_vec = BP_L_ID(bp) + offset;
6967 bp->msix_table[1].entry = igu_vec;
6968 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6969 offset++;
6970#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006971 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006972 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006973 bp->msix_table[i + offset].entry = igu_vec;
6974 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6975 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006976 }
6977
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006978 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006979 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006980 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006981 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6982 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006983 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006985 bp->flags |= USING_MSIX_FLAG;
6986
6987 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006988}
6989
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006990static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6991{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006992 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006993
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006994 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6995 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006996 if (rc) {
6997 BNX2X_ERR("request sp irq failed\n");
6998 return -EBUSY;
6999 }
7000
Michael Chan37b091b2009-10-10 13:46:55 +00007001#ifdef BCM_CNIC
7002 offset++;
7003#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007004 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007005 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007006 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7007 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007009 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007010 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007011 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007012 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007013 bnx2x_free_msix_irqs(bp);
7014 return -EBUSY;
7015 }
7016
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007017 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007018 }
7019
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007020 i = BNX2X_NUM_QUEUES(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007021 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7022 " ... fp[%d] %d\n",
7023 bp->dev->name, bp->msix_table[0].vector,
7024 0, bp->msix_table[offset].vector,
7025 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007026
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007027 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007028}
7029
Eilon Greenstein8badd272009-02-12 08:36:15 +00007030static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007031{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007032 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007033
Eilon Greenstein8badd272009-02-12 08:36:15 +00007034 rc = pci_enable_msi(bp->pdev);
7035 if (rc) {
7036 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7037 return -1;
7038 }
7039 bp->flags |= USING_MSI_FLAG;
7040
7041 return 0;
7042}
7043
7044static int bnx2x_req_irq(struct bnx2x *bp)
7045{
7046 unsigned long flags;
7047 int rc;
7048
7049 if (bp->flags & USING_MSI_FLAG)
7050 flags = 0;
7051 else
7052 flags = IRQF_SHARED;
7053
7054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007055 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007056 if (!rc)
7057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7058
7059 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007060}
7061
Yitchak Gertner65abd742008-08-25 15:26:24 -07007062static void bnx2x_napi_enable(struct bnx2x *bp)
7063{
7064 int i;
7065
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007066 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007067 napi_enable(&bnx2x_fp(bp, i, napi));
7068}
7069
7070static void bnx2x_napi_disable(struct bnx2x *bp)
7071{
7072 int i;
7073
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007074 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007075 napi_disable(&bnx2x_fp(bp, i, napi));
7076}
7077
7078static void bnx2x_netif_start(struct bnx2x *bp)
7079{
Eilon Greensteine1510702009-07-21 05:47:41 +00007080 int intr_sem;
7081
7082 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7083 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7084
7085 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007086 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007087 bnx2x_napi_enable(bp);
7088 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007089 if (bp->state == BNX2X_STATE_OPEN)
7090 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007091 }
7092 }
7093}
7094
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007095static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007096{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007097 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007098 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007099 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007100}
7101
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007102/*
7103 * Init service functions
7104 */
7105
Michael Chane665bfd2009-10-10 13:46:54 +00007106/**
7107 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7108 *
7109 * @param bp driver descriptor
7110 * @param set set or clear an entry (1 or 0)
7111 * @param mac pointer to a buffer containing a MAC
7112 * @param cl_bit_vec bit vector of clients to register a MAC for
7113 * @param cam_offset offset in a CAM to use
7114 * @param with_bcast set broadcast MAC as well
7115 */
7116static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7117 u32 cl_bit_vec, u8 cam_offset,
7118 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007119{
7120 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007121 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007122
7123 /* CAM allocation
7124 * unicasts 0-31:port0 32-63:port1
7125 * multicast 64-127:port0 128-191:port1
7126 */
Michael Chane665bfd2009-10-10 13:46:54 +00007127 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7128 config->hdr.offset = cam_offset;
7129 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007130 config->hdr.reserved1 = 0;
7131
7132 /* primary MAC */
7133 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007134 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007135 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007136 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007137 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007138 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007139 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007140 if (set)
7141 config->config_table[0].target_table_entry.flags = 0;
7142 else
7143 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007144 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007145 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007146 config->config_table[0].target_table_entry.vlan_id = 0;
7147
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007148 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7149 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007150 config->config_table[0].cam_entry.msb_mac_addr,
7151 config->config_table[0].cam_entry.middle_mac_addr,
7152 config->config_table[0].cam_entry.lsb_mac_addr);
7153
7154 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007155 if (with_bcast) {
7156 config->config_table[1].cam_entry.msb_mac_addr =
7157 cpu_to_le16(0xffff);
7158 config->config_table[1].cam_entry.middle_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.lsb_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7163 if (set)
7164 config->config_table[1].target_table_entry.flags =
7165 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7166 else
7167 CAM_INVALIDATE(config->config_table[1]);
7168 config->config_table[1].target_table_entry.clients_bit_vector =
7169 cpu_to_le32(cl_bit_vec);
7170 config->config_table[1].target_table_entry.vlan_id = 0;
7171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007172
7173 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7174 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7175 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7176}
7177
Michael Chane665bfd2009-10-10 13:46:54 +00007178/**
7179 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7180 *
7181 * @param bp driver descriptor
7182 * @param set set or clear an entry (1 or 0)
7183 * @param mac pointer to a buffer containing a MAC
7184 * @param cl_bit_vec bit vector of clients to register a MAC for
7185 * @param cam_offset offset in a CAM to use
7186 */
7187static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7188 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007189{
7190 struct mac_configuration_cmd_e1h *config =
7191 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7192
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007193 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007194 config->hdr.offset = cam_offset;
7195 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007196 config->hdr.reserved1 = 0;
7197
7198 /* primary MAC */
7199 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007200 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007201 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007202 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007203 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007204 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007205 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007206 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007207 config->config_table[0].vlan_id = 0;
7208 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007209 if (set)
7210 config->config_table[0].flags = BP_PORT(bp);
7211 else
7212 config->config_table[0].flags =
7213 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007214
Michael Chane665bfd2009-10-10 13:46:54 +00007215 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007216 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007217 config->config_table[0].msb_mac_addr,
7218 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007219 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007220
7221 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7222 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7223 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7224}
7225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007226static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7227 int *state_p, int poll)
7228{
7229 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007230 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007231
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007232 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7233 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007234
7235 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007236 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007237 if (poll) {
7238 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007239 /* if index is different from 0
7240 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007241 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007242 */
7243 if (idx)
7244 bnx2x_rx_int(&bp->fp[idx], 10);
7245 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007246
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007247 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007248 if (*state_p == state) {
7249#ifdef BNX2X_STOP_ON_ERROR
7250 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7251#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007252 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007253 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007254
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007255 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007256
7257 if (bp->panic)
7258 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007259 }
7260
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007261 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007262 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7263 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007264#ifdef BNX2X_STOP_ON_ERROR
7265 bnx2x_panic();
7266#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007267
Eliezer Tamir49d66772008-02-28 11:53:13 -08007268 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007269}
7270
Michael Chane665bfd2009-10-10 13:46:54 +00007271static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7272{
7273 bp->set_mac_pending++;
7274 smp_wmb();
7275
7276 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7277 (1 << bp->fp->cl_id), BP_FUNC(bp));
7278
7279 /* Wait for a completion */
7280 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7281}
7282
7283static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7284{
7285 bp->set_mac_pending++;
7286 smp_wmb();
7287
7288 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7289 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7290 1);
7291
7292 /* Wait for a completion */
7293 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7294}
7295
Michael Chan993ac7b2009-10-10 13:46:56 +00007296#ifdef BCM_CNIC
7297/**
7298 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7299 * MAC(s). This function will wait until the ramdord completion
7300 * returns.
7301 *
7302 * @param bp driver handle
7303 * @param set set or clear the CAM entry
7304 *
7305 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7306 */
7307static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7308{
7309 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7310
7311 bp->set_mac_pending++;
7312 smp_wmb();
7313
7314 /* Send a SET_MAC ramrod */
7315 if (CHIP_IS_E1(bp))
7316 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7317 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7318 1);
7319 else
7320 /* CAM allocation for E1H
7321 * unicasts: by func number
7322 * multicast: 20+FUNC*20, 20 each
7323 */
7324 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7325 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7326
7327 /* Wait for a completion when setting */
7328 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7329
7330 return 0;
7331}
7332#endif
7333
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007334static int bnx2x_setup_leading(struct bnx2x *bp)
7335{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007336 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007337
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007338 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007339 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007340
7341 /* SETUP ramrod */
7342 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7343
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007344 /* Wait for completion */
7345 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007346
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007347 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007348}
7349
7350static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7351{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007352 struct bnx2x_fastpath *fp = &bp->fp[index];
7353
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007354 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007355 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007356
Eliezer Tamir228241e2008-02-28 11:56:57 -08007357 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007358 fp->state = BNX2X_FP_STATE_OPENING;
7359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7360 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007361
7362 /* Wait for completion */
7363 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007364 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007365}
7366
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007367static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007368
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007369static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370{
Eilon Greensteinca003922009-08-12 22:53:28 -07007371
7372 switch (bp->multi_mode) {
7373 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007374 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007375 break;
7376
7377 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007378 if (num_queues)
7379 bp->num_queues = min_t(u32, num_queues,
7380 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007381 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007382 bp->num_queues = min_t(u32, num_online_cpus(),
7383 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007384 break;
7385
7386
7387 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007388 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007389 break;
7390 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007391}
7392
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007393static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007394{
7395 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007396
Eilon Greenstein8badd272009-02-12 08:36:15 +00007397 switch (int_mode) {
7398 case INT_MODE_INTx:
7399 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007400 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007401 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007402 break;
7403
7404 case INT_MODE_MSIX:
7405 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007406 /* Set number of queues according to bp->multi_mode value */
7407 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007408
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007409 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7410 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007411
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007412 /* if we can't use MSI-X we only need one fp,
7413 * so try to enable MSI-X with the requested number of fp's
7414 * and fallback to MSI or legacy INTx with one fp
7415 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007416 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007417 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007418 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007419 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007420 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007421 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007422 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007423 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007424}
7425
Michael Chan993ac7b2009-10-10 13:46:56 +00007426#ifdef BCM_CNIC
7427static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7428static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7429#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007430
7431/* must be called with rtnl_lock */
7432static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7433{
7434 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007435 int i, rc;
7436
Eilon Greenstein8badd272009-02-12 08:36:15 +00007437#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007438 if (unlikely(bp->panic))
7439 return -EPERM;
7440#endif
7441
7442 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7443
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007444 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007445
7446 if (bnx2x_alloc_mem(bp))
7447 return -ENOMEM;
7448
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007449 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007450 bnx2x_fp(bp, i, disable_tpa) =
7451 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7452
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007453 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007454 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7455 bnx2x_poll, 128);
7456
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007457 bnx2x_napi_enable(bp);
7458
7459 if (bp->flags & USING_MSIX_FLAG) {
7460 rc = bnx2x_req_msix_irqs(bp);
7461 if (rc) {
7462 pci_disable_msix(bp->pdev);
7463 goto load_error1;
7464 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007465 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007466 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007467 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007468 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7469 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007470 bnx2x_ack_int(bp);
7471 rc = bnx2x_req_irq(bp);
7472 if (rc) {
7473 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007474 if (bp->flags & USING_MSI_FLAG)
7475 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007476 goto load_error1;
7477 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007478 if (bp->flags & USING_MSI_FLAG) {
7479 bp->dev->irq = bp->pdev->irq;
7480 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7481 bp->dev->name, bp->pdev->irq);
7482 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007483 }
7484
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007485 /* Send LOAD_REQUEST command to MCP
7486 Returns the type of LOAD command:
7487 if it is the first port to be initialized
7488 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007489 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007490 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007491 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7492 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007493 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007494 rc = -EBUSY;
7495 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007496 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007497 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7498 rc = -EBUSY; /* other port in diagnostic mode */
7499 goto load_error2;
7500 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007501
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007502 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007503 int port = BP_PORT(bp);
7504
Eilon Greensteinf5372252009-02-12 08:38:30 +00007505 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007506 load_count[0], load_count[1], load_count[2]);
7507 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007508 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007509 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007510 load_count[0], load_count[1], load_count[2]);
7511 if (load_count[0] == 1)
7512 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007513 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007514 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7515 else
7516 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007517 }
7518
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007519 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7520 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7521 bp->port.pmf = 1;
7522 else
7523 bp->port.pmf = 0;
7524 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7525
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007526 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007527 rc = bnx2x_init_hw(bp, load_code);
7528 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007529 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007530 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007531 }
7532
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007533 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007534 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007535
Eilon Greenstein2691d512009-08-12 08:22:08 +00007536 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7537 (bp->common.shmem2_base))
7538 SHMEM2_WR(bp, dcc_support,
7539 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7540 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7541
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007542 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007543 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007544 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7545 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007546 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007547 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007548 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007549 }
7550 }
7551
7552 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7553
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007554 rc = bnx2x_setup_leading(bp);
7555 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007556 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007557#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007558 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007559#else
7560 bp->panic = 1;
7561 return -EBUSY;
7562#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007563 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007564
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007565 if (CHIP_IS_E1H(bp))
7566 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007567 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007568 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007569 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007570
Eilon Greensteinca003922009-08-12 22:53:28 -07007571 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007572#ifdef BCM_CNIC
7573 /* Enable Timer scan */
7574 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7575#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007576 for_each_nondefault_queue(bp, i) {
7577 rc = bnx2x_setup_multi(bp, i);
7578 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007579#ifdef BCM_CNIC
7580 goto load_error4;
7581#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007582 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007583#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007585
Eilon Greensteinca003922009-08-12 22:53:28 -07007586 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007587 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007588 else
Michael Chane665bfd2009-10-10 13:46:54 +00007589 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007590#ifdef BCM_CNIC
7591 /* Set iSCSI L2 MAC */
7592 mutex_lock(&bp->cnic_mutex);
7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596 }
7597 mutex_unlock(&bp->cnic_mutex);
7598#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007599 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007600
7601 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007602 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007603
7604 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007605 switch (load_mode) {
7606 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007607 if (bp->state == BNX2X_STATE_OPEN) {
7608 /* Tx queue should be only reenabled */
7609 netif_tx_wake_all_queues(bp->dev);
7610 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007611 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007612 bnx2x_set_rx_mode(bp->dev);
7613 break;
7614
7615 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007616 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007617 if (bp->state != BNX2X_STATE_OPEN)
7618 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007619 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007620 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007621 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007622
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007623 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007624 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007625 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007626 bp->state = BNX2X_STATE_DIAG;
7627 break;
7628
7629 default:
7630 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007631 }
7632
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007633 if (!bp->port.pmf)
7634 bnx2x__link_status_update(bp);
7635
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007636 /* start the timer */
7637 mod_timer(&bp->timer, jiffies + bp->current_interval);
7638
Michael Chan993ac7b2009-10-10 13:46:56 +00007639#ifdef BCM_CNIC
7640 bnx2x_setup_cnic_irq_info(bp);
7641 if (bp->state == BNX2X_STATE_OPEN)
7642 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7643#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007644
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007645 return 0;
7646
Michael Chan37b091b2009-10-10 13:46:55 +00007647#ifdef BCM_CNIC
7648load_error4:
7649 /* Disable Timer scan */
7650 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7651#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007652load_error3:
7653 bnx2x_int_disable_sync(bp, 1);
7654 if (!BP_NOMCP(bp)) {
7655 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7656 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7657 }
7658 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007659 /* Free SKBs, SGEs, TPA pool and driver internals */
7660 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007661 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007662 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007663load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007664 /* Release IRQs */
7665 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007666load_error1:
7667 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007668 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007669 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007670 bnx2x_free_mem(bp);
7671
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007672 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007673}
7674
7675static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7676{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007677 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007678 int rc;
7679
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007680 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007681 fp->state = BNX2X_FP_STATE_HALTING;
7682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007683
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007684 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007685 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007686 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007687 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007688 return rc;
7689
7690 /* delete cfc entry */
7691 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7692
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007693 /* Wait for completion */
7694 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007695 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007696 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007697}
7698
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007699static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007700{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007701 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007702 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007703 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007704 int cnt = 500;
7705 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007706
7707 might_sleep();
7708
7709 /* Send HALT ramrod */
7710 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007711 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007712
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007713 /* Wait for completion */
7714 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7715 &(bp->fp[0].state), 1);
7716 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007717 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007718
Eliezer Tamir49d66772008-02-28 11:53:13 -08007719 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007720
Eliezer Tamir228241e2008-02-28 11:56:57 -08007721 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007722 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7723
Eliezer Tamir49d66772008-02-28 11:53:13 -08007724 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007725 we are going to reset the chip anyway
7726 so there is not much to do if this times out
7727 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007728 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007729 if (!cnt) {
7730 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7731 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7732 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7733#ifdef BNX2X_STOP_ON_ERROR
7734 bnx2x_panic();
7735#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007736 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007737 break;
7738 }
7739 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007740 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007741 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007742 }
7743 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7744 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007745
7746 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007747}
7748
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007749static void bnx2x_reset_func(struct bnx2x *bp)
7750{
7751 int port = BP_PORT(bp);
7752 int func = BP_FUNC(bp);
7753 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007754
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007755 /* Configure IGU */
7756 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7757 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7758
Michael Chan37b091b2009-10-10 13:46:55 +00007759#ifdef BCM_CNIC
7760 /* Disable Timer scan */
7761 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7762 /*
7763 * Wait for at least 10ms and up to 2 second for the timers scan to
7764 * complete
7765 */
7766 for (i = 0; i < 200; i++) {
7767 msleep(10);
7768 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7769 break;
7770 }
7771#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007772 /* Clear ILT */
7773 base = FUNC_ILT_BASE(func);
7774 for (i = base; i < base + ILT_PER_FUNC; i++)
7775 bnx2x_ilt_wr(bp, i, 0);
7776}
7777
7778static void bnx2x_reset_port(struct bnx2x *bp)
7779{
7780 int port = BP_PORT(bp);
7781 u32 val;
7782
7783 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7784
7785 /* Do not rcv packets to BRB */
7786 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7787 /* Do not direct rcv packets that are not for MCP to the BRB */
7788 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7789 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7790
7791 /* Configure AEU */
7792 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7793
7794 msleep(100);
7795 /* Check for BRB port occupancy */
7796 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7797 if (val)
7798 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007799 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007800
7801 /* TODO: Close Doorbell port? */
7802}
7803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007804static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7805{
7806 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7807 BP_FUNC(bp), reset_code);
7808
7809 switch (reset_code) {
7810 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7811 bnx2x_reset_port(bp);
7812 bnx2x_reset_func(bp);
7813 bnx2x_reset_common(bp);
7814 break;
7815
7816 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7817 bnx2x_reset_port(bp);
7818 bnx2x_reset_func(bp);
7819 break;
7820
7821 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7822 bnx2x_reset_func(bp);
7823 break;
7824
7825 default:
7826 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7827 break;
7828 }
7829}
7830
Eilon Greenstein33471622008-08-13 15:59:08 -07007831/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007832static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007833{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007834 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007835 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007836 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007837
Michael Chan993ac7b2009-10-10 13:46:56 +00007838#ifdef BCM_CNIC
7839 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7840#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007841 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7842
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007843 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007844 bp->rx_mode = BNX2X_RX_MODE_NONE;
7845 bnx2x_set_storm_rx_mode(bp);
7846
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007847 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007848 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007849
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007850 del_timer_sync(&bp->timer);
7851 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7852 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007853 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007854
Eilon Greenstein70b99862009-01-14 06:43:48 +00007855 /* Release IRQs */
7856 bnx2x_free_irq(bp);
7857
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007858 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007859 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007860 struct bnx2x_fastpath *fp = &bp->fp[i];
7861
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007862 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007863 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007864
Eilon Greenstein7961f792009-03-02 07:59:31 +00007865 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007866 if (!cnt) {
7867 BNX2X_ERR("timeout waiting for queue[%d]\n",
7868 i);
7869#ifdef BNX2X_STOP_ON_ERROR
7870 bnx2x_panic();
7871 return -EBUSY;
7872#else
7873 break;
7874#endif
7875 }
7876 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007877 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007878 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007879 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007880 /* Give HW time to discard old tx messages */
7881 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007882
Yitchak Gertner65abd742008-08-25 15:26:24 -07007883 if (CHIP_IS_E1(bp)) {
7884 struct mac_configuration_cmd *config =
7885 bnx2x_sp(bp, mcast_config);
7886
Michael Chane665bfd2009-10-10 13:46:54 +00007887 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007888
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007889 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007890 CAM_INVALIDATE(config->config_table[i]);
7891
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007892 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007893 if (CHIP_REV_IS_SLOW(bp))
7894 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7895 else
7896 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007897 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007898 config->hdr.reserved1 = 0;
7899
Michael Chane665bfd2009-10-10 13:46:54 +00007900 bp->set_mac_pending++;
7901 smp_wmb();
7902
Yitchak Gertner65abd742008-08-25 15:26:24 -07007903 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7904 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7905 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7906
7907 } else { /* E1H */
7908 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7909
Michael Chane665bfd2009-10-10 13:46:54 +00007910 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007911
7912 for (i = 0; i < MC_HASH_SIZE; i++)
7913 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007914
7915 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007916 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007917#ifdef BCM_CNIC
7918 /* Clear iSCSI L2 MAC */
7919 mutex_lock(&bp->cnic_mutex);
7920 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7921 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7922 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7923 }
7924 mutex_unlock(&bp->cnic_mutex);
7925#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007926
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007927 if (unload_mode == UNLOAD_NORMAL)
7928 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007929
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007930 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007931 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007932
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007933 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007934 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007935 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007936 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007937 /* The mac address is written to entries 1-4 to
7938 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007939 u8 entry = (BP_E1HVN(bp) + 1)*8;
7940
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007941 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007942 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007943
7944 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7945 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007946 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007947
7948 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007949
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007950 } else
7951 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7952
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007953 /* Close multi and leading connections
7954 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007955 for_each_nondefault_queue(bp, i)
7956 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007957 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007958
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007959 rc = bnx2x_stop_leading(bp);
7960 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007961 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007962#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007963 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007964#else
7965 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007966#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007967 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007968
Eliezer Tamir228241e2008-02-28 11:56:57 -08007969unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007970 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007971 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007972 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007973 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007974 load_count[0], load_count[1], load_count[2]);
7975 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007976 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007977 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007978 load_count[0], load_count[1], load_count[2]);
7979 if (load_count[0] == 0)
7980 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007981 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007982 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7983 else
7984 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7985 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007986
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007987 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7988 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7989 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007990
7991 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007992 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007993
7994 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007995 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007996 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007997
Eilon Greenstein9a035442008-11-03 16:45:55 -08007998 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007999
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008000 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008002 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008003 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008004 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008005 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008006 bnx2x_free_mem(bp);
8007
8008 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008009
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008010 netif_carrier_off(bp->dev);
8011
8012 return 0;
8013}
8014
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008015static void bnx2x_reset_task(struct work_struct *work)
8016{
8017 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8018
8019#ifdef BNX2X_STOP_ON_ERROR
8020 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8021 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008022 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008023 return;
8024#endif
8025
8026 rtnl_lock();
8027
8028 if (!netif_running(bp->dev))
8029 goto reset_task_exit;
8030
8031 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8032 bnx2x_nic_load(bp, LOAD_NORMAL);
8033
8034reset_task_exit:
8035 rtnl_unlock();
8036}
8037
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008038/* end of nic load/unload */
8039
8040/* ethtool_ops */
8041
8042/*
8043 * Init service functions
8044 */
8045
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008046static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8047{
8048 switch (func) {
8049 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8050 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8051 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8052 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8053 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8054 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8055 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8056 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8057 default:
8058 BNX2X_ERR("Unsupported function index: %d\n", func);
8059 return (u32)(-1);
8060 }
8061}
8062
8063static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8064{
8065 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8066
8067 /* Flush all outstanding writes */
8068 mmiowb();
8069
8070 /* Pretend to be function 0 */
8071 REG_WR(bp, reg, 0);
8072 /* Flush the GRC transaction (in the chip) */
8073 new_val = REG_RD(bp, reg);
8074 if (new_val != 0) {
8075 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8076 new_val);
8077 BUG();
8078 }
8079
8080 /* From now we are in the "like-E1" mode */
8081 bnx2x_int_disable(bp);
8082
8083 /* Flush all outstanding writes */
8084 mmiowb();
8085
8086 /* Restore the original funtion settings */
8087 REG_WR(bp, reg, orig_func);
8088 new_val = REG_RD(bp, reg);
8089 if (new_val != orig_func) {
8090 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8091 orig_func, new_val);
8092 BUG();
8093 }
8094}
8095
8096static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8097{
8098 if (CHIP_IS_E1H(bp))
8099 bnx2x_undi_int_disable_e1h(bp, func);
8100 else
8101 bnx2x_int_disable(bp);
8102}
8103
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008104static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008105{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008106 u32 val;
8107
8108 /* Check if there is any driver already loaded */
8109 val = REG_RD(bp, MISC_REG_UNPREPARED);
8110 if (val == 0x1) {
8111 /* Check if it is the UNDI driver
8112 * UNDI driver initializes CID offset for normal bell to 0x7
8113 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008114 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008115 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8116 if (val == 0x7) {
8117 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008118 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008119 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008120 u32 swap_en;
8121 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008122
Eilon Greensteinb4661732009-01-14 06:43:56 +00008123 /* clear the UNDI indication */
8124 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8125
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008126 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8127
8128 /* try unload UNDI on port 0 */
8129 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008130 bp->fw_seq =
8131 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8132 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008133 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008134
8135 /* if UNDI is loaded on the other port */
8136 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8137
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008138 /* send "DONE" for previous unload */
8139 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8140
8141 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008142 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008143 bp->fw_seq =
8144 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8145 DRV_MSG_SEQ_NUMBER_MASK);
8146 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008147
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008148 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008149 }
8150
Eilon Greensteinb4661732009-01-14 06:43:56 +00008151 /* now it's safe to release the lock */
8152 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8153
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008154 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008155
8156 /* close input traffic and wait for it */
8157 /* Do not rcv packets to BRB */
8158 REG_WR(bp,
8159 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8160 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8161 /* Do not direct rcv packets that are not for MCP to
8162 * the BRB */
8163 REG_WR(bp,
8164 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8165 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8166 /* clear AEU */
8167 REG_WR(bp,
8168 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8169 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8170 msleep(10);
8171
8172 /* save NIG port swap info */
8173 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8174 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008175 /* reset device */
8176 REG_WR(bp,
8177 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008178 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008179 REG_WR(bp,
8180 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8181 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008182 /* take the NIG out of reset and restore swap values */
8183 REG_WR(bp,
8184 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8185 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8186 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8187 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8188
8189 /* send unload done to the MCP */
8190 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8191
8192 /* restore our func and fw_seq */
8193 bp->func = func;
8194 bp->fw_seq =
8195 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8196 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008197
8198 } else
8199 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008200 }
8201}
8202
8203static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8204{
8205 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008206 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008207
8208 /* Get the chip revision id and number. */
8209 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8210 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8211 id = ((val & 0xffff) << 16);
8212 val = REG_RD(bp, MISC_REG_CHIP_REV);
8213 id |= ((val & 0xf) << 12);
8214 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8215 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008216 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008217 id |= (val & 0xf);
8218 bp->common.chip_id = id;
8219 bp->link_params.chip_id = bp->common.chip_id;
8220 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8221
Eilon Greenstein1c063282009-02-12 08:36:43 +00008222 val = (REG_RD(bp, 0x2874) & 0x55);
8223 if ((bp->common.chip_id & 0x1) ||
8224 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8225 bp->flags |= ONE_PORT_FLAG;
8226 BNX2X_DEV_INFO("single port device\n");
8227 }
8228
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008229 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8230 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8231 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8232 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8233 bp->common.flash_size, bp->common.flash_size);
8234
8235 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008236 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008237 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008238 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8239 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008240
8241 if (!bp->common.shmem_base ||
8242 (bp->common.shmem_base < 0xA0000) ||
8243 (bp->common.shmem_base >= 0xC0000)) {
8244 BNX2X_DEV_INFO("MCP not active\n");
8245 bp->flags |= NO_MCP_FLAG;
8246 return;
8247 }
8248
8249 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8250 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8251 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8252 BNX2X_ERR("BAD MCP validity signature\n");
8253
8254 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008255 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008256
8257 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8258 SHARED_HW_CFG_LED_MODE_MASK) >>
8259 SHARED_HW_CFG_LED_MODE_SHIFT);
8260
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008261 bp->link_params.feature_config_flags = 0;
8262 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8263 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8264 bp->link_params.feature_config_flags |=
8265 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8266 else
8267 bp->link_params.feature_config_flags &=
8268 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008270 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8271 bp->common.bc_ver = val;
8272 BNX2X_DEV_INFO("bc_ver %X\n", val);
8273 if (val < BNX2X_BC_VER) {
8274 /* for now only warn
8275 * later we might need to enforce this */
8276 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8277 " please upgrade BC\n", BNX2X_BC_VER, val);
8278 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008279 bp->link_params.feature_config_flags |=
8280 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8281 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008282
8283 if (BP_E1HVN(bp) == 0) {
8284 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8285 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8286 } else {
8287 /* no WOL capability for E1HVN != 0 */
8288 bp->flags |= NO_WOL_FLAG;
8289 }
8290 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008291 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008292
8293 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8294 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8295 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8296 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8297
8298 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8299 val, val2, val3, val4);
8300}
8301
8302static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8303 u32 switch_cfg)
8304{
8305 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008306 u32 ext_phy_type;
8307
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008308 switch (switch_cfg) {
8309 case SWITCH_CFG_1G:
8310 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8311
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008312 ext_phy_type =
8313 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008314 switch (ext_phy_type) {
8315 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8316 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8317 ext_phy_type);
8318
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008319 bp->port.supported |= (SUPPORTED_10baseT_Half |
8320 SUPPORTED_10baseT_Full |
8321 SUPPORTED_100baseT_Half |
8322 SUPPORTED_100baseT_Full |
8323 SUPPORTED_1000baseT_Full |
8324 SUPPORTED_2500baseX_Full |
8325 SUPPORTED_TP |
8326 SUPPORTED_FIBRE |
8327 SUPPORTED_Autoneg |
8328 SUPPORTED_Pause |
8329 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008330 break;
8331
8332 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8333 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8334 ext_phy_type);
8335
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008336 bp->port.supported |= (SUPPORTED_10baseT_Half |
8337 SUPPORTED_10baseT_Full |
8338 SUPPORTED_100baseT_Half |
8339 SUPPORTED_100baseT_Full |
8340 SUPPORTED_1000baseT_Full |
8341 SUPPORTED_TP |
8342 SUPPORTED_FIBRE |
8343 SUPPORTED_Autoneg |
8344 SUPPORTED_Pause |
8345 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008346 break;
8347
8348 default:
8349 BNX2X_ERR("NVRAM config error. "
8350 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008351 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008352 return;
8353 }
8354
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008355 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8356 port*0x10);
8357 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008358 break;
8359
8360 case SWITCH_CFG_10G:
8361 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8362
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008363 ext_phy_type =
8364 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008365 switch (ext_phy_type) {
8366 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8367 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8368 ext_phy_type);
8369
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008370 bp->port.supported |= (SUPPORTED_10baseT_Half |
8371 SUPPORTED_10baseT_Full |
8372 SUPPORTED_100baseT_Half |
8373 SUPPORTED_100baseT_Full |
8374 SUPPORTED_1000baseT_Full |
8375 SUPPORTED_2500baseX_Full |
8376 SUPPORTED_10000baseT_Full |
8377 SUPPORTED_TP |
8378 SUPPORTED_FIBRE |
8379 SUPPORTED_Autoneg |
8380 SUPPORTED_Pause |
8381 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008382 break;
8383
Eliezer Tamirf1410642008-02-28 11:51:50 -08008384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8385 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8386 ext_phy_type);
8387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008388 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8389 SUPPORTED_1000baseT_Full |
8390 SUPPORTED_FIBRE |
8391 SUPPORTED_Autoneg |
8392 SUPPORTED_Pause |
8393 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008394 break;
8395
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8397 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8398 ext_phy_type);
8399
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008400 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8401 SUPPORTED_2500baseX_Full |
8402 SUPPORTED_1000baseT_Full |
8403 SUPPORTED_FIBRE |
8404 SUPPORTED_Autoneg |
8405 SUPPORTED_Pause |
8406 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008407 break;
8408
Eilon Greenstein589abe32009-02-12 08:36:55 +00008409 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8410 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8411 ext_phy_type);
8412
8413 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8414 SUPPORTED_FIBRE |
8415 SUPPORTED_Pause |
8416 SUPPORTED_Asym_Pause);
8417 break;
8418
8419 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8420 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8421 ext_phy_type);
8422
8423 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8424 SUPPORTED_1000baseT_Full |
8425 SUPPORTED_FIBRE |
8426 SUPPORTED_Pause |
8427 SUPPORTED_Asym_Pause);
8428 break;
8429
8430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8431 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8432 ext_phy_type);
8433
8434 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8435 SUPPORTED_1000baseT_Full |
8436 SUPPORTED_Autoneg |
8437 SUPPORTED_FIBRE |
8438 SUPPORTED_Pause |
8439 SUPPORTED_Asym_Pause);
8440 break;
8441
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008442 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8443 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8444 ext_phy_type);
8445
8446 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8447 SUPPORTED_1000baseT_Full |
8448 SUPPORTED_Autoneg |
8449 SUPPORTED_FIBRE |
8450 SUPPORTED_Pause |
8451 SUPPORTED_Asym_Pause);
8452 break;
8453
Eliezer Tamirf1410642008-02-28 11:51:50 -08008454 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8455 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8456 ext_phy_type);
8457
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008458 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8459 SUPPORTED_TP |
8460 SUPPORTED_Autoneg |
8461 SUPPORTED_Pause |
8462 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008463 break;
8464
Eilon Greenstein28577182009-02-12 08:37:00 +00008465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8466 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8467 ext_phy_type);
8468
8469 bp->port.supported |= (SUPPORTED_10baseT_Half |
8470 SUPPORTED_10baseT_Full |
8471 SUPPORTED_100baseT_Half |
8472 SUPPORTED_100baseT_Full |
8473 SUPPORTED_1000baseT_Full |
8474 SUPPORTED_10000baseT_Full |
8475 SUPPORTED_TP |
8476 SUPPORTED_Autoneg |
8477 SUPPORTED_Pause |
8478 SUPPORTED_Asym_Pause);
8479 break;
8480
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008481 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8482 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8483 bp->link_params.ext_phy_config);
8484 break;
8485
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008486 default:
8487 BNX2X_ERR("NVRAM config error. "
8488 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008489 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008490 return;
8491 }
8492
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008493 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8494 port*0x18);
8495 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008496
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008497 break;
8498
8499 default:
8500 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008501 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008502 return;
8503 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008504 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008505
8506 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008507 if (!(bp->link_params.speed_cap_mask &
8508 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008509 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008510
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008511 if (!(bp->link_params.speed_cap_mask &
8512 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008513 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008514
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008515 if (!(bp->link_params.speed_cap_mask &
8516 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008517 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008518
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008519 if (!(bp->link_params.speed_cap_mask &
8520 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008521 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008522
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008523 if (!(bp->link_params.speed_cap_mask &
8524 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008525 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8526 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008527
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008528 if (!(bp->link_params.speed_cap_mask &
8529 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008530 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008531
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008532 if (!(bp->link_params.speed_cap_mask &
8533 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008534 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008535
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008536 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008537}
8538
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008539static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008540{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008541 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008543 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008544 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008545 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008546 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008547 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008548 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008549 u32 ext_phy_type =
8550 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8551
8552 if ((ext_phy_type ==
8553 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8554 (ext_phy_type ==
8555 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008556 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008557 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008558 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008559 (ADVERTISED_10000baseT_Full |
8560 ADVERTISED_FIBRE);
8561 break;
8562 }
8563 BNX2X_ERR("NVRAM config error. "
8564 "Invalid link_config 0x%x"
8565 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008566 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008567 return;
8568 }
8569 break;
8570
8571 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008572 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008573 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008574 bp->port.advertising = (ADVERTISED_10baseT_Full |
8575 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008576 } else {
8577 BNX2X_ERR("NVRAM config error. "
8578 "Invalid link_config 0x%x"
8579 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008580 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008581 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008582 return;
8583 }
8584 break;
8585
8586 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008587 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008588 bp->link_params.req_line_speed = SPEED_10;
8589 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008590 bp->port.advertising = (ADVERTISED_10baseT_Half |
8591 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008592 } else {
8593 BNX2X_ERR("NVRAM config error. "
8594 "Invalid link_config 0x%x"
8595 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008596 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008597 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008598 return;
8599 }
8600 break;
8601
8602 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008603 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008604 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008605 bp->port.advertising = (ADVERTISED_100baseT_Full |
8606 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008607 } else {
8608 BNX2X_ERR("NVRAM config error. "
8609 "Invalid link_config 0x%x"
8610 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008611 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008612 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008613 return;
8614 }
8615 break;
8616
8617 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008618 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008619 bp->link_params.req_line_speed = SPEED_100;
8620 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008621 bp->port.advertising = (ADVERTISED_100baseT_Half |
8622 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008623 } else {
8624 BNX2X_ERR("NVRAM config error. "
8625 "Invalid link_config 0x%x"
8626 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008627 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008628 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008629 return;
8630 }
8631 break;
8632
8633 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008634 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008635 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008636 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8637 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008638 } else {
8639 BNX2X_ERR("NVRAM config error. "
8640 "Invalid link_config 0x%x"
8641 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008642 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008643 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008644 return;
8645 }
8646 break;
8647
8648 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008649 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008650 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008651 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8652 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008653 } else {
8654 BNX2X_ERR("NVRAM config error. "
8655 "Invalid link_config 0x%x"
8656 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008657 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008658 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008659 return;
8660 }
8661 break;
8662
8663 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8664 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8665 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008666 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008667 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008668 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8669 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008670 } else {
8671 BNX2X_ERR("NVRAM config error. "
8672 "Invalid link_config 0x%x"
8673 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008674 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008675 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008676 return;
8677 }
8678 break;
8679
8680 default:
8681 BNX2X_ERR("NVRAM config error. "
8682 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008683 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008684 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008685 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008686 break;
8687 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008688
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008689 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8690 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008691 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008692 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008693 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008694
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008695 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008696 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008697 bp->link_params.req_line_speed,
8698 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008699 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008700}
8701
Michael Chane665bfd2009-10-10 13:46:54 +00008702static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8703{
8704 mac_hi = cpu_to_be16(mac_hi);
8705 mac_lo = cpu_to_be32(mac_lo);
8706 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8707 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8708}
8709
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008710static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008711{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712 int port = BP_PORT(bp);
8713 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008714 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008715 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008716 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008717
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008718 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008719 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008720
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008721 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008722 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008723 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008724 SHMEM_RD(bp,
8725 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008726 /* BCM8727_NOC => BCM8727 no over current */
8727 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8728 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8729 bp->link_params.ext_phy_config &=
8730 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8731 bp->link_params.ext_phy_config |=
8732 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8733 bp->link_params.feature_config_flags |=
8734 FEATURE_CONFIG_BCM8727_NOC;
8735 }
8736
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008737 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008738 SHMEM_RD(bp,
8739 dev_info.port_hw_config[port].speed_capability_mask);
8740
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008741 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008742 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8743
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008744 /* Get the 4 lanes xgxs config rx and tx */
8745 for (i = 0; i < 2; i++) {
8746 val = SHMEM_RD(bp,
8747 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8748 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8749 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8750
8751 val = SHMEM_RD(bp,
8752 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8753 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8754 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8755 }
8756
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008757 /* If the device is capable of WoL, set the default state according
8758 * to the HW
8759 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008760 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008761 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8762 (config & PORT_FEATURE_WOL_ENABLED));
8763
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008764 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8765 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008766 bp->link_params.lane_config,
8767 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008768 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008769
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008770 bp->link_params.switch_cfg |= (bp->port.link_config &
8771 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008772 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008773
8774 bnx2x_link_settings_requested(bp);
8775
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008776 /*
8777 * If connected directly, work with the internal PHY, otherwise, work
8778 * with the external PHY
8779 */
8780 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8781 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8782 bp->mdio.prtad = bp->link_params.phy_addr;
8783
8784 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8785 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8786 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008787 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008788
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008789 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8790 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008791 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008792 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8793 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008794
8795#ifdef BCM_CNIC
8796 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8797 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8798 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8799#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008800}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008801
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008802static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8803{
8804 int func = BP_FUNC(bp);
8805 u32 val, val2;
8806 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008807
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008808 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008809
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008810 bp->e1hov = 0;
8811 bp->e1hmf = 0;
8812 if (CHIP_IS_E1H(bp)) {
8813 bp->mf_config =
8814 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008815
Eilon Greenstein2691d512009-08-12 08:22:08 +00008816 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008817 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008818 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008819 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008820 BNX2X_DEV_INFO("%s function mode\n",
8821 IS_E1HMF(bp) ? "multi" : "single");
8822
8823 if (IS_E1HMF(bp)) {
8824 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8825 e1hov_tag) &
8826 FUNC_MF_CFG_E1HOV_TAG_MASK);
8827 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8828 bp->e1hov = val;
8829 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8830 "(0x%04x)\n",
8831 func, bp->e1hov, bp->e1hov);
8832 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008833 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8834 " aborting\n", func);
8835 rc = -EPERM;
8836 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008837 } else {
8838 if (BP_E1HVN(bp)) {
8839 BNX2X_ERR("!!! VN %d in single function mode,"
8840 " aborting\n", BP_E1HVN(bp));
8841 rc = -EPERM;
8842 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008843 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008844 }
8845
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008846 if (!BP_NOMCP(bp)) {
8847 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008848
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008849 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8850 DRV_MSG_SEQ_NUMBER_MASK);
8851 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8852 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008853
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008854 if (IS_E1HMF(bp)) {
8855 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8856 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8857 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8858 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8859 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8860 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8861 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8862 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8863 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8864 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8865 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8866 ETH_ALEN);
8867 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8868 ETH_ALEN);
8869 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008870
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008871 return rc;
8872 }
8873
8874 if (BP_NOMCP(bp)) {
8875 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008876 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008877 random_ether_addr(bp->dev->dev_addr);
8878 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8879 }
8880
8881 return rc;
8882}
8883
8884static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8885{
8886 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008887 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008888 int rc;
8889
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008890 /* Disable interrupt handling until HW is initialized */
8891 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008892 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008893
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008894 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008895 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008896#ifdef BCM_CNIC
8897 mutex_init(&bp->cnic_mutex);
8898#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008899
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008900 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008901 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8902
8903 rc = bnx2x_get_hwinfo(bp);
8904
8905 /* need to reset chip if undi was active */
8906 if (!BP_NOMCP(bp))
8907 bnx2x_undi_unload(bp);
8908
8909 if (CHIP_REV_IS_FPGA(bp))
8910 printk(KERN_ERR PFX "FPGA detected\n");
8911
8912 if (BP_NOMCP(bp) && (func == 0))
8913 printk(KERN_ERR PFX
8914 "MCP disabled, must load devices in order!\n");
8915
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008916 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008917 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8918 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008919 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008920 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008921 multi_mode = ETH_RSS_MODE_DISABLED;
8922 }
8923 bp->multi_mode = multi_mode;
8924
8925
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008926 /* Set TPA flags */
8927 if (disable_tpa) {
8928 bp->flags &= ~TPA_ENABLE_FLAG;
8929 bp->dev->features &= ~NETIF_F_LRO;
8930 } else {
8931 bp->flags |= TPA_ENABLE_FLAG;
8932 bp->dev->features |= NETIF_F_LRO;
8933 }
8934
Eilon Greensteina18f5122009-08-12 08:23:26 +00008935 if (CHIP_IS_E1(bp))
8936 bp->dropless_fc = 0;
8937 else
8938 bp->dropless_fc = dropless_fc;
8939
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008940 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008941
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008942 bp->tx_ring_size = MAX_TX_AVAIL;
8943 bp->rx_ring_size = MAX_RX_AVAIL;
8944
8945 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008946
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008947 /* make sure that the numbers are in the right granularity */
8948 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8949 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008950
Eilon Greenstein87942b42009-02-12 08:36:49 +00008951 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8952 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008953
8954 init_timer(&bp->timer);
8955 bp->timer.expires = jiffies + bp->current_interval;
8956 bp->timer.data = (unsigned long) bp;
8957 bp->timer.function = bnx2x_timer;
8958
8959 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008960}
8961
8962/*
8963 * ethtool service functions
8964 */
8965
8966/* All ethtool functions called with rtnl_lock */
8967
8968static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8969{
8970 struct bnx2x *bp = netdev_priv(dev);
8971
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008972 cmd->supported = bp->port.supported;
8973 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008974
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008975 if ((bp->state == BNX2X_STATE_OPEN) &&
8976 !(bp->flags & MF_FUNC_DIS) &&
8977 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008978 cmd->speed = bp->link_vars.line_speed;
8979 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008980 if (IS_E1HMF(bp)) {
8981 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008982
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008983 vn_max_rate =
8984 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008985 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008986 if (vn_max_rate < cmd->speed)
8987 cmd->speed = vn_max_rate;
8988 }
8989 } else {
8990 cmd->speed = -1;
8991 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008992 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008993
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008994 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8995 u32 ext_phy_type =
8996 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008997
8998 switch (ext_phy_type) {
8999 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009000 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009001 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009002 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009006 cmd->port = PORT_FIBRE;
9007 break;
9008
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009011 cmd->port = PORT_TP;
9012 break;
9013
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9015 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9016 bp->link_params.ext_phy_config);
9017 break;
9018
Eliezer Tamirf1410642008-02-28 11:51:50 -08009019 default:
9020 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009021 bp->link_params.ext_phy_config);
9022 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009023 }
9024 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009025 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009026
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009027 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009028 cmd->transceiver = XCVR_INTERNAL;
9029
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009030 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009031 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009032 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009034
9035 cmd->maxtxpkt = 0;
9036 cmd->maxrxpkt = 0;
9037
9038 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9039 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9040 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9041 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9042 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9043 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9044 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9045
9046 return 0;
9047}
9048
9049static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9050{
9051 struct bnx2x *bp = netdev_priv(dev);
9052 u32 advertising;
9053
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009054 if (IS_E1HMF(bp))
9055 return 0;
9056
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009057 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9058 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9059 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9060 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9061 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9062 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9063 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9064
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009065 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009066 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9067 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009068 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009069 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070
9071 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009072 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009073
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009074 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9075 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009076 bp->port.advertising |= (ADVERTISED_Autoneg |
9077 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078
9079 } else { /* forced speed */
9080 /* advertise the requested speed and duplex if supported */
9081 switch (cmd->speed) {
9082 case SPEED_10:
9083 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009084 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009085 SUPPORTED_10baseT_Full)) {
9086 DP(NETIF_MSG_LINK,
9087 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009088 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009089 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009090
9091 advertising = (ADVERTISED_10baseT_Full |
9092 ADVERTISED_TP);
9093 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009094 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009095 SUPPORTED_10baseT_Half)) {
9096 DP(NETIF_MSG_LINK,
9097 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009098 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009099 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009100
9101 advertising = (ADVERTISED_10baseT_Half |
9102 ADVERTISED_TP);
9103 }
9104 break;
9105
9106 case SPEED_100:
9107 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009108 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009109 SUPPORTED_100baseT_Full)) {
9110 DP(NETIF_MSG_LINK,
9111 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009112 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009113 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009114
9115 advertising = (ADVERTISED_100baseT_Full |
9116 ADVERTISED_TP);
9117 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009118 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009119 SUPPORTED_100baseT_Half)) {
9120 DP(NETIF_MSG_LINK,
9121 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009122 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009123 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009124
9125 advertising = (ADVERTISED_100baseT_Half |
9126 ADVERTISED_TP);
9127 }
9128 break;
9129
9130 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009131 if (cmd->duplex != DUPLEX_FULL) {
9132 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009133 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009134 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009136 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009137 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009139 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140
9141 advertising = (ADVERTISED_1000baseT_Full |
9142 ADVERTISED_TP);
9143 break;
9144
9145 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009146 if (cmd->duplex != DUPLEX_FULL) {
9147 DP(NETIF_MSG_LINK,
9148 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009149 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009150 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009151
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009152 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009153 DP(NETIF_MSG_LINK,
9154 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009155 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009156 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009157
Eliezer Tamirf1410642008-02-28 11:51:50 -08009158 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009159 ADVERTISED_TP);
9160 break;
9161
9162 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009163 if (cmd->duplex != DUPLEX_FULL) {
9164 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009165 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009166 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009168 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009169 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009170 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009172
9173 advertising = (ADVERTISED_10000baseT_Full |
9174 ADVERTISED_FIBRE);
9175 break;
9176
9177 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009178 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009179 return -EINVAL;
9180 }
9181
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009182 bp->link_params.req_line_speed = cmd->speed;
9183 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009184 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009185 }
9186
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009187 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009188 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009189 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009190 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009191
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009192 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009193 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009194 bnx2x_link_set(bp);
9195 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009196
9197 return 0;
9198}
9199
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009200#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9201#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9202
9203static int bnx2x_get_regs_len(struct net_device *dev)
9204{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009205 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009206 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009207 int i;
9208
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009209 if (CHIP_IS_E1(bp)) {
9210 for (i = 0; i < REGS_COUNT; i++)
9211 if (IS_E1_ONLINE(reg_addrs[i].info))
9212 regdump_len += reg_addrs[i].size;
9213
9214 for (i = 0; i < WREGS_COUNT_E1; i++)
9215 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9216 regdump_len += wreg_addrs_e1[i].size *
9217 (1 + wreg_addrs_e1[i].read_regs_count);
9218
9219 } else { /* E1H */
9220 for (i = 0; i < REGS_COUNT; i++)
9221 if (IS_E1H_ONLINE(reg_addrs[i].info))
9222 regdump_len += reg_addrs[i].size;
9223
9224 for (i = 0; i < WREGS_COUNT_E1H; i++)
9225 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9226 regdump_len += wreg_addrs_e1h[i].size *
9227 (1 + wreg_addrs_e1h[i].read_regs_count);
9228 }
9229 regdump_len *= 4;
9230 regdump_len += sizeof(struct dump_hdr);
9231
9232 return regdump_len;
9233}
9234
9235static void bnx2x_get_regs(struct net_device *dev,
9236 struct ethtool_regs *regs, void *_p)
9237{
9238 u32 *p = _p, i, j;
9239 struct bnx2x *bp = netdev_priv(dev);
9240 struct dump_hdr dump_hdr = {0};
9241
9242 regs->version = 0;
9243 memset(p, 0, regs->len);
9244
9245 if (!netif_running(bp->dev))
9246 return;
9247
9248 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9249 dump_hdr.dump_sign = dump_sign_all;
9250 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9251 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9252 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9253 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9254 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9255
9256 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9257 p += dump_hdr.hdr_size + 1;
9258
9259 if (CHIP_IS_E1(bp)) {
9260 for (i = 0; i < REGS_COUNT; i++)
9261 if (IS_E1_ONLINE(reg_addrs[i].info))
9262 for (j = 0; j < reg_addrs[i].size; j++)
9263 *p++ = REG_RD(bp,
9264 reg_addrs[i].addr + j*4);
9265
9266 } else { /* E1H */
9267 for (i = 0; i < REGS_COUNT; i++)
9268 if (IS_E1H_ONLINE(reg_addrs[i].info))
9269 for (j = 0; j < reg_addrs[i].size; j++)
9270 *p++ = REG_RD(bp,
9271 reg_addrs[i].addr + j*4);
9272 }
9273}
9274
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009275#define PHY_FW_VER_LEN 10
9276
9277static void bnx2x_get_drvinfo(struct net_device *dev,
9278 struct ethtool_drvinfo *info)
9279{
9280 struct bnx2x *bp = netdev_priv(dev);
9281 u8 phy_fw_ver[PHY_FW_VER_LEN];
9282
9283 strcpy(info->driver, DRV_MODULE_NAME);
9284 strcpy(info->version, DRV_MODULE_VERSION);
9285
9286 phy_fw_ver[0] = '\0';
9287 if (bp->port.pmf) {
9288 bnx2x_acquire_phy_lock(bp);
9289 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9290 (bp->state != BNX2X_STATE_CLOSED),
9291 phy_fw_ver, PHY_FW_VER_LEN);
9292 bnx2x_release_phy_lock(bp);
9293 }
9294
9295 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9296 (bp->common.bc_ver & 0xff0000) >> 16,
9297 (bp->common.bc_ver & 0xff00) >> 8,
9298 (bp->common.bc_ver & 0xff),
9299 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9300 strcpy(info->bus_info, pci_name(bp->pdev));
9301 info->n_stats = BNX2X_NUM_STATS;
9302 info->testinfo_len = BNX2X_NUM_TESTS;
9303 info->eedump_len = bp->common.flash_size;
9304 info->regdump_len = bnx2x_get_regs_len(dev);
9305}
9306
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009307static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9308{
9309 struct bnx2x *bp = netdev_priv(dev);
9310
9311 if (bp->flags & NO_WOL_FLAG) {
9312 wol->supported = 0;
9313 wol->wolopts = 0;
9314 } else {
9315 wol->supported = WAKE_MAGIC;
9316 if (bp->wol)
9317 wol->wolopts = WAKE_MAGIC;
9318 else
9319 wol->wolopts = 0;
9320 }
9321 memset(&wol->sopass, 0, sizeof(wol->sopass));
9322}
9323
9324static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9325{
9326 struct bnx2x *bp = netdev_priv(dev);
9327
9328 if (wol->wolopts & ~WAKE_MAGIC)
9329 return -EINVAL;
9330
9331 if (wol->wolopts & WAKE_MAGIC) {
9332 if (bp->flags & NO_WOL_FLAG)
9333 return -EINVAL;
9334
9335 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009336 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009337 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009338
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009339 return 0;
9340}
9341
9342static u32 bnx2x_get_msglevel(struct net_device *dev)
9343{
9344 struct bnx2x *bp = netdev_priv(dev);
9345
9346 return bp->msglevel;
9347}
9348
9349static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9350{
9351 struct bnx2x *bp = netdev_priv(dev);
9352
9353 if (capable(CAP_NET_ADMIN))
9354 bp->msglevel = level;
9355}
9356
9357static int bnx2x_nway_reset(struct net_device *dev)
9358{
9359 struct bnx2x *bp = netdev_priv(dev);
9360
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009361 if (!bp->port.pmf)
9362 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009363
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009364 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009365 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009366 bnx2x_link_set(bp);
9367 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009368
9369 return 0;
9370}
9371
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009372static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009373{
9374 struct bnx2x *bp = netdev_priv(dev);
9375
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009376 if (bp->flags & MF_FUNC_DIS)
9377 return 0;
9378
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009379 return bp->link_vars.link_up;
9380}
9381
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009382static int bnx2x_get_eeprom_len(struct net_device *dev)
9383{
9384 struct bnx2x *bp = netdev_priv(dev);
9385
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009386 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009387}
9388
9389static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9390{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009391 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009392 int count, i;
9393 u32 val = 0;
9394
9395 /* adjust timeout for emulation/FPGA */
9396 count = NVRAM_TIMEOUT_COUNT;
9397 if (CHIP_REV_IS_SLOW(bp))
9398 count *= 100;
9399
9400 /* request access to nvram interface */
9401 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9402 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9403
9404 for (i = 0; i < count*10; i++) {
9405 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9406 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9407 break;
9408
9409 udelay(5);
9410 }
9411
9412 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009413 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009414 return -EBUSY;
9415 }
9416
9417 return 0;
9418}
9419
9420static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9421{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009422 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009423 int count, i;
9424 u32 val = 0;
9425
9426 /* adjust timeout for emulation/FPGA */
9427 count = NVRAM_TIMEOUT_COUNT;
9428 if (CHIP_REV_IS_SLOW(bp))
9429 count *= 100;
9430
9431 /* relinquish nvram interface */
9432 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9433 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9434
9435 for (i = 0; i < count*10; i++) {
9436 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9437 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9438 break;
9439
9440 udelay(5);
9441 }
9442
9443 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009444 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009445 return -EBUSY;
9446 }
9447
9448 return 0;
9449}
9450
9451static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9452{
9453 u32 val;
9454
9455 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9456
9457 /* enable both bits, even on read */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9459 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9460 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9461}
9462
9463static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9464{
9465 u32 val;
9466
9467 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9468
9469 /* disable both bits, even after read */
9470 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9471 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9472 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9473}
9474
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009475static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009476 u32 cmd_flags)
9477{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009478 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009479 u32 val;
9480
9481 /* build the command word */
9482 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9483
9484 /* need to clear DONE bit separately */
9485 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9486
9487 /* address of the NVRAM to read from */
9488 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9489 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9490
9491 /* issue a read command */
9492 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9493
9494 /* adjust timeout for emulation/FPGA */
9495 count = NVRAM_TIMEOUT_COUNT;
9496 if (CHIP_REV_IS_SLOW(bp))
9497 count *= 100;
9498
9499 /* wait for completion */
9500 *ret_val = 0;
9501 rc = -EBUSY;
9502 for (i = 0; i < count; i++) {
9503 udelay(5);
9504 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9505
9506 if (val & MCPR_NVM_COMMAND_DONE) {
9507 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009508 /* we read nvram data in cpu order
9509 * but ethtool sees it as an array of bytes
9510 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009511 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009512 rc = 0;
9513 break;
9514 }
9515 }
9516
9517 return rc;
9518}
9519
9520static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9521 int buf_size)
9522{
9523 int rc;
9524 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009525 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009526
9527 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009528 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009529 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009530 offset, buf_size);
9531 return -EINVAL;
9532 }
9533
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009534 if (offset + buf_size > bp->common.flash_size) {
9535 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009536 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009537 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009538 return -EINVAL;
9539 }
9540
9541 /* request access to nvram interface */
9542 rc = bnx2x_acquire_nvram_lock(bp);
9543 if (rc)
9544 return rc;
9545
9546 /* enable access to nvram interface */
9547 bnx2x_enable_nvram_access(bp);
9548
9549 /* read the first word(s) */
9550 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9551 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9552 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9553 memcpy(ret_buf, &val, 4);
9554
9555 /* advance to the next dword */
9556 offset += sizeof(u32);
9557 ret_buf += sizeof(u32);
9558 buf_size -= sizeof(u32);
9559 cmd_flags = 0;
9560 }
9561
9562 if (rc == 0) {
9563 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9564 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9565 memcpy(ret_buf, &val, 4);
9566 }
9567
9568 /* disable access to nvram interface */
9569 bnx2x_disable_nvram_access(bp);
9570 bnx2x_release_nvram_lock(bp);
9571
9572 return rc;
9573}
9574
9575static int bnx2x_get_eeprom(struct net_device *dev,
9576 struct ethtool_eeprom *eeprom, u8 *eebuf)
9577{
9578 struct bnx2x *bp = netdev_priv(dev);
9579 int rc;
9580
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009581 if (!netif_running(dev))
9582 return -EAGAIN;
9583
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009584 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009585 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9586 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9587 eeprom->len, eeprom->len);
9588
9589 /* parameters already validated in ethtool_get_eeprom */
9590
9591 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9592
9593 return rc;
9594}
9595
9596static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9597 u32 cmd_flags)
9598{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009599 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009600
9601 /* build the command word */
9602 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9603
9604 /* need to clear DONE bit separately */
9605 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9606
9607 /* write the data */
9608 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9609
9610 /* address of the NVRAM to write to */
9611 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9612 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9613
9614 /* issue the write command */
9615 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9616
9617 /* adjust timeout for emulation/FPGA */
9618 count = NVRAM_TIMEOUT_COUNT;
9619 if (CHIP_REV_IS_SLOW(bp))
9620 count *= 100;
9621
9622 /* wait for completion */
9623 rc = -EBUSY;
9624 for (i = 0; i < count; i++) {
9625 udelay(5);
9626 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9627 if (val & MCPR_NVM_COMMAND_DONE) {
9628 rc = 0;
9629 break;
9630 }
9631 }
9632
9633 return rc;
9634}
9635
Eliezer Tamirf1410642008-02-28 11:51:50 -08009636#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009637
9638static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9639 int buf_size)
9640{
9641 int rc;
9642 u32 cmd_flags;
9643 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009644 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009646 if (offset + buf_size > bp->common.flash_size) {
9647 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009648 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009649 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009650 return -EINVAL;
9651 }
9652
9653 /* request access to nvram interface */
9654 rc = bnx2x_acquire_nvram_lock(bp);
9655 if (rc)
9656 return rc;
9657
9658 /* enable access to nvram interface */
9659 bnx2x_enable_nvram_access(bp);
9660
9661 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9662 align_offset = (offset & ~0x03);
9663 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9664
9665 if (rc == 0) {
9666 val &= ~(0xff << BYTE_OFFSET(offset));
9667 val |= (*data_buf << BYTE_OFFSET(offset));
9668
9669 /* nvram data is returned as an array of bytes
9670 * convert it back to cpu order */
9671 val = be32_to_cpu(val);
9672
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009673 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9674 cmd_flags);
9675 }
9676
9677 /* disable access to nvram interface */
9678 bnx2x_disable_nvram_access(bp);
9679 bnx2x_release_nvram_lock(bp);
9680
9681 return rc;
9682}
9683
9684static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9685 int buf_size)
9686{
9687 int rc;
9688 u32 cmd_flags;
9689 u32 val;
9690 u32 written_so_far;
9691
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009692 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009693 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009694
9695 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009696 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009697 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009698 offset, buf_size);
9699 return -EINVAL;
9700 }
9701
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009702 if (offset + buf_size > bp->common.flash_size) {
9703 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009704 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009705 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009706 return -EINVAL;
9707 }
9708
9709 /* request access to nvram interface */
9710 rc = bnx2x_acquire_nvram_lock(bp);
9711 if (rc)
9712 return rc;
9713
9714 /* enable access to nvram interface */
9715 bnx2x_enable_nvram_access(bp);
9716
9717 written_so_far = 0;
9718 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9719 while ((written_so_far < buf_size) && (rc == 0)) {
9720 if (written_so_far == (buf_size - sizeof(u32)))
9721 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9722 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9723 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9725 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9726
9727 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009728
9729 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9730
9731 /* advance to the next dword */
9732 offset += sizeof(u32);
9733 data_buf += sizeof(u32);
9734 written_so_far += sizeof(u32);
9735 cmd_flags = 0;
9736 }
9737
9738 /* disable access to nvram interface */
9739 bnx2x_disable_nvram_access(bp);
9740 bnx2x_release_nvram_lock(bp);
9741
9742 return rc;
9743}
9744
9745static int bnx2x_set_eeprom(struct net_device *dev,
9746 struct ethtool_eeprom *eeprom, u8 *eebuf)
9747{
9748 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009749 int port = BP_PORT(bp);
9750 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009751
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009752 if (!netif_running(dev))
9753 return -EAGAIN;
9754
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009755 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009756 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9757 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9758 eeprom->len, eeprom->len);
9759
9760 /* parameters already validated in ethtool_set_eeprom */
9761
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009762 /* PHY eeprom can be accessed only by the PMF */
9763 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9764 !bp->port.pmf)
9765 return -EINVAL;
9766
9767 if (eeprom->magic == 0x50485950) {
9768 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9769 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9770
9771 bnx2x_acquire_phy_lock(bp);
9772 rc |= bnx2x_link_reset(&bp->link_params,
9773 &bp->link_vars, 0);
9774 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9776 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9777 MISC_REGISTERS_GPIO_HIGH, port);
9778 bnx2x_release_phy_lock(bp);
9779 bnx2x_link_report(bp);
9780
9781 } else if (eeprom->magic == 0x50485952) {
9782 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009783 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009784 bnx2x_acquire_phy_lock(bp);
9785 rc |= bnx2x_link_reset(&bp->link_params,
9786 &bp->link_vars, 1);
9787
9788 rc |= bnx2x_phy_init(&bp->link_params,
9789 &bp->link_vars);
9790 bnx2x_release_phy_lock(bp);
9791 bnx2x_calc_fc_adv(bp);
9792 }
9793 } else if (eeprom->magic == 0x53985943) {
9794 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9795 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9797 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009798 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009799
9800 /* DSP Remove Download Mode */
9801 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9802 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009803
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009804 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009805
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009806 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9807
9808 /* wait 0.5 sec to allow it to run */
9809 msleep(500);
9810 bnx2x_ext_phy_hw_reset(bp, port);
9811 msleep(500);
9812 bnx2x_release_phy_lock(bp);
9813 }
9814 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009815 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009816
9817 return rc;
9818}
9819
9820static int bnx2x_get_coalesce(struct net_device *dev,
9821 struct ethtool_coalesce *coal)
9822{
9823 struct bnx2x *bp = netdev_priv(dev);
9824
9825 memset(coal, 0, sizeof(struct ethtool_coalesce));
9826
9827 coal->rx_coalesce_usecs = bp->rx_ticks;
9828 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009829
9830 return 0;
9831}
9832
Eilon Greensteinca003922009-08-12 22:53:28 -07009833#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009834static int bnx2x_set_coalesce(struct net_device *dev,
9835 struct ethtool_coalesce *coal)
9836{
9837 struct bnx2x *bp = netdev_priv(dev);
9838
9839 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009840 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9841 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009842
9843 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009844 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9845 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009846
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009847 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009848 bnx2x_update_coalesce(bp);
9849
9850 return 0;
9851}
9852
9853static void bnx2x_get_ringparam(struct net_device *dev,
9854 struct ethtool_ringparam *ering)
9855{
9856 struct bnx2x *bp = netdev_priv(dev);
9857
9858 ering->rx_max_pending = MAX_RX_AVAIL;
9859 ering->rx_mini_max_pending = 0;
9860 ering->rx_jumbo_max_pending = 0;
9861
9862 ering->rx_pending = bp->rx_ring_size;
9863 ering->rx_mini_pending = 0;
9864 ering->rx_jumbo_pending = 0;
9865
9866 ering->tx_max_pending = MAX_TX_AVAIL;
9867 ering->tx_pending = bp->tx_ring_size;
9868}
9869
9870static int bnx2x_set_ringparam(struct net_device *dev,
9871 struct ethtool_ringparam *ering)
9872{
9873 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009874 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009875
9876 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9877 (ering->tx_pending > MAX_TX_AVAIL) ||
9878 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9879 return -EINVAL;
9880
9881 bp->rx_ring_size = ering->rx_pending;
9882 bp->tx_ring_size = ering->tx_pending;
9883
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009884 if (netif_running(dev)) {
9885 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9886 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009887 }
9888
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009889 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009890}
9891
9892static void bnx2x_get_pauseparam(struct net_device *dev,
9893 struct ethtool_pauseparam *epause)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
Eilon Greenstein356e2382009-02-12 08:38:32 +00009897 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9898 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009899 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9900
David S. Millerc0700f92008-12-16 23:53:20 -08009901 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9902 BNX2X_FLOW_CTRL_RX);
9903 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9904 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009905
9906 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9907 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9908 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9909}
9910
9911static int bnx2x_set_pauseparam(struct net_device *dev,
9912 struct ethtool_pauseparam *epause)
9913{
9914 struct bnx2x *bp = netdev_priv(dev);
9915
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009916 if (IS_E1HMF(bp))
9917 return 0;
9918
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009919 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9921 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922
David S. Millerc0700f92008-12-16 23:53:20 -08009923 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009924
9925 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009926 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009927
9928 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009929 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009930
David S. Millerc0700f92008-12-16 23:53:20 -08009931 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9932 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009934 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009935 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009936 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009937 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009938 }
9939
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009940 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009941 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009942 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009943
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009944 DP(NETIF_MSG_LINK,
9945 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009946
9947 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009949 bnx2x_link_set(bp);
9950 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009951
9952 return 0;
9953}
9954
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009955static int bnx2x_set_flags(struct net_device *dev, u32 data)
9956{
9957 struct bnx2x *bp = netdev_priv(dev);
9958 int changed = 0;
9959 int rc = 0;
9960
9961 /* TPA requires Rx CSUM offloading */
9962 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9963 if (!(dev->features & NETIF_F_LRO)) {
9964 dev->features |= NETIF_F_LRO;
9965 bp->flags |= TPA_ENABLE_FLAG;
9966 changed = 1;
9967 }
9968
9969 } else if (dev->features & NETIF_F_LRO) {
9970 dev->features &= ~NETIF_F_LRO;
9971 bp->flags &= ~TPA_ENABLE_FLAG;
9972 changed = 1;
9973 }
9974
9975 if (changed && netif_running(dev)) {
9976 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9977 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9978 }
9979
9980 return rc;
9981}
9982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009983static u32 bnx2x_get_rx_csum(struct net_device *dev)
9984{
9985 struct bnx2x *bp = netdev_priv(dev);
9986
9987 return bp->rx_csum;
9988}
9989
9990static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9991{
9992 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009993 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009994
9995 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009996
9997 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9998 TPA'ed packets will be discarded due to wrong TCP CSUM */
9999 if (!data) {
10000 u32 flags = ethtool_op_get_flags(dev);
10001
10002 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10003 }
10004
10005 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010006}
10007
10008static int bnx2x_set_tso(struct net_device *dev, u32 data)
10009{
Eilon Greenstein755735e2008-06-23 20:35:13 -070010010 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010011 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010012 dev->features |= NETIF_F_TSO6;
10013 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010014 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010015 dev->features &= ~NETIF_F_TSO6;
10016 }
10017
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010018 return 0;
10019}
10020
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010021static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010022 char string[ETH_GSTRING_LEN];
10023} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010024 { "register_test (offline)" },
10025 { "memory_test (offline)" },
10026 { "loopback_test (offline)" },
10027 { "nvram_test (online)" },
10028 { "interrupt_test (online)" },
10029 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010030 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010031};
10032
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010033static int bnx2x_test_registers(struct bnx2x *bp)
10034{
10035 int idx, i, rc = -ENODEV;
10036 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010037 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010038 static const struct {
10039 u32 offset0;
10040 u32 offset1;
10041 u32 mask;
10042 } reg_tbl[] = {
10043/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10044 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10045 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10046 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10047 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10048 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10049 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10050 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10051 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10052 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10053/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10054 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10055 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10056 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10057 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10058 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10059 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10060 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010061 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010062 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10063/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010064 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10065 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10066 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10067 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10068 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10069 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10070 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10071 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010072 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10073/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010074 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10075 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10076 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10077 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10078 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10079 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10080
10081 { 0xffffffff, 0, 0x00000000 }
10082 };
10083
10084 if (!netif_running(bp->dev))
10085 return rc;
10086
10087 /* Repeat the test twice:
10088 First by writing 0x00000000, second by writing 0xffffffff */
10089 for (idx = 0; idx < 2; idx++) {
10090
10091 switch (idx) {
10092 case 0:
10093 wr_val = 0;
10094 break;
10095 case 1:
10096 wr_val = 0xffffffff;
10097 break;
10098 }
10099
10100 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10101 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010102
10103 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10104 mask = reg_tbl[i].mask;
10105
10106 save_val = REG_RD(bp, offset);
10107
10108 REG_WR(bp, offset, wr_val);
10109 val = REG_RD(bp, offset);
10110
10111 /* Restore the original register's value */
10112 REG_WR(bp, offset, save_val);
10113
10114 /* verify that value is as expected value */
10115 if ((val & mask) != (wr_val & mask))
10116 goto test_reg_exit;
10117 }
10118 }
10119
10120 rc = 0;
10121
10122test_reg_exit:
10123 return rc;
10124}
10125
10126static int bnx2x_test_memory(struct bnx2x *bp)
10127{
10128 int i, j, rc = -ENODEV;
10129 u32 val;
10130 static const struct {
10131 u32 offset;
10132 int size;
10133 } mem_tbl[] = {
10134 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10135 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10136 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10137 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10138 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10139 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10140 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10141
10142 { 0xffffffff, 0 }
10143 };
10144 static const struct {
10145 char *name;
10146 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010147 u32 e1_mask;
10148 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010149 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010150 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10151 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10152 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10153 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10154 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10155 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010156
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010157 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010158 };
10159
10160 if (!netif_running(bp->dev))
10161 return rc;
10162
10163 /* Go through all the memories */
10164 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10165 for (j = 0; j < mem_tbl[i].size; j++)
10166 REG_RD(bp, mem_tbl[i].offset + j*4);
10167
10168 /* Check the parity status */
10169 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10170 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010171 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10172 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010173 DP(NETIF_MSG_HW,
10174 "%s is 0x%x\n", prty_tbl[i].name, val);
10175 goto test_mem_exit;
10176 }
10177 }
10178
10179 rc = 0;
10180
10181test_mem_exit:
10182 return rc;
10183}
10184
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010185static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10186{
10187 int cnt = 1000;
10188
10189 if (link_up)
10190 while (bnx2x_link_test(bp) && cnt--)
10191 msleep(10);
10192}
10193
10194static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10195{
10196 unsigned int pkt_size, num_pkts, i;
10197 struct sk_buff *skb;
10198 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010199 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010200 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010201 u16 tx_start_idx, tx_idx;
10202 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010203 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010204 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010205 struct eth_tx_start_bd *tx_start_bd;
10206 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010207 dma_addr_t mapping;
10208 union eth_rx_cqe *cqe;
10209 u8 cqe_fp_flags;
10210 struct sw_rx_bd *rx_buf;
10211 u16 len;
10212 int rc = -ENODEV;
10213
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010214 /* check the loopback mode */
10215 switch (loopback_mode) {
10216 case BNX2X_PHY_LOOPBACK:
10217 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10218 return -EINVAL;
10219 break;
10220 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010221 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010222 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010223 break;
10224 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010225 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010226 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010227
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010228 /* prepare the loopback packet */
10229 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10230 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010231 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10232 if (!skb) {
10233 rc = -ENOMEM;
10234 goto test_loopback_exit;
10235 }
10236 packet = skb_put(skb, pkt_size);
10237 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010238 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10239 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010240 for (i = ETH_HLEN; i < pkt_size; i++)
10241 packet[i] = (unsigned char) (i & 0xff);
10242
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010243 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010244 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010245 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10246 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010247
Eilon Greensteinca003922009-08-12 22:53:28 -070010248 pkt_prod = fp_tx->tx_pkt_prod++;
10249 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10250 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010251 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010252 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010253
Eilon Greensteinca003922009-08-12 22:53:28 -070010254 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10255 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010256 mapping = pci_map_single(bp->pdev, skb->data,
10257 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010258 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10259 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10260 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10261 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10262 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10263 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10264 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10265 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10266
10267 /* turn on parsing and get a BD */
10268 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10269 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10270
10271 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010272
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010273 wmb();
10274
Eilon Greensteinca003922009-08-12 22:53:28 -070010275 fp_tx->tx_db.data.prod += 2;
10276 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010277 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010278
10279 mmiowb();
10280
10281 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010282 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010283
10284 udelay(100);
10285
Eilon Greensteinca003922009-08-12 22:53:28 -070010286 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010287 if (tx_idx != tx_start_idx + num_pkts)
10288 goto test_loopback_exit;
10289
Eilon Greensteinca003922009-08-12 22:53:28 -070010290 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010291 if (rx_idx != rx_start_idx + num_pkts)
10292 goto test_loopback_exit;
10293
Eilon Greensteinca003922009-08-12 22:53:28 -070010294 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010295 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10296 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10297 goto test_loopback_rx_exit;
10298
10299 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10300 if (len != pkt_size)
10301 goto test_loopback_rx_exit;
10302
Eilon Greensteinca003922009-08-12 22:53:28 -070010303 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010304 skb = rx_buf->skb;
10305 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10306 for (i = ETH_HLEN; i < pkt_size; i++)
10307 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10308 goto test_loopback_rx_exit;
10309
10310 rc = 0;
10311
10312test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010313
Eilon Greensteinca003922009-08-12 22:53:28 -070010314 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10315 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10316 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10317 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010318
10319 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010320 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10321 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010322
10323test_loopback_exit:
10324 bp->link_params.loopback_mode = LOOPBACK_NONE;
10325
10326 return rc;
10327}
10328
10329static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10330{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010331 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010332
10333 if (!netif_running(bp->dev))
10334 return BNX2X_LOOPBACK_FAILED;
10335
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010336 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010337 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010338
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010339 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10340 if (res) {
10341 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10342 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010343 }
10344
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010345 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10346 if (res) {
10347 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10348 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010349 }
10350
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010351 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010352 bnx2x_netif_start(bp);
10353
10354 return rc;
10355}
10356
10357#define CRC32_RESIDUAL 0xdebb20e3
10358
10359static int bnx2x_test_nvram(struct bnx2x *bp)
10360{
10361 static const struct {
10362 int offset;
10363 int size;
10364 } nvram_tbl[] = {
10365 { 0, 0x14 }, /* bootstrap */
10366 { 0x14, 0xec }, /* dir */
10367 { 0x100, 0x350 }, /* manuf_info */
10368 { 0x450, 0xf0 }, /* feature_info */
10369 { 0x640, 0x64 }, /* upgrade_key_info */
10370 { 0x6a4, 0x64 },
10371 { 0x708, 0x70 }, /* manuf_key_info */
10372 { 0x778, 0x70 },
10373 { 0, 0 }
10374 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010375 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010376 u8 *data = (u8 *)buf;
10377 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010378 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010379
10380 rc = bnx2x_nvram_read(bp, 0, data, 4);
10381 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010382 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010383 goto test_nvram_exit;
10384 }
10385
10386 magic = be32_to_cpu(buf[0]);
10387 if (magic != 0x669955aa) {
10388 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10389 rc = -ENODEV;
10390 goto test_nvram_exit;
10391 }
10392
10393 for (i = 0; nvram_tbl[i].size; i++) {
10394
10395 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10396 nvram_tbl[i].size);
10397 if (rc) {
10398 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010399 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010400 goto test_nvram_exit;
10401 }
10402
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010403 crc = ether_crc_le(nvram_tbl[i].size, data);
10404 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010405 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010406 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010407 rc = -ENODEV;
10408 goto test_nvram_exit;
10409 }
10410 }
10411
10412test_nvram_exit:
10413 return rc;
10414}
10415
10416static int bnx2x_test_intr(struct bnx2x *bp)
10417{
10418 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10419 int i, rc;
10420
10421 if (!netif_running(bp->dev))
10422 return -ENODEV;
10423
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010424 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010425 if (CHIP_IS_E1(bp))
10426 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10427 else
10428 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010429 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010430 config->hdr.reserved1 = 0;
10431
Michael Chane665bfd2009-10-10 13:46:54 +000010432 bp->set_mac_pending++;
10433 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010434 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10435 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10436 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10437 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010438 for (i = 0; i < 10; i++) {
10439 if (!bp->set_mac_pending)
10440 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010441 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010442 msleep_interruptible(10);
10443 }
10444 if (i == 10)
10445 rc = -ENODEV;
10446 }
10447
10448 return rc;
10449}
10450
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010451static void bnx2x_self_test(struct net_device *dev,
10452 struct ethtool_test *etest, u64 *buf)
10453{
10454 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010455
10456 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10457
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010458 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010459 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010460
Eilon Greenstein33471622008-08-13 15:59:08 -070010461 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010462 if (IS_E1HMF(bp))
10463 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10464
10465 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010466 int port = BP_PORT(bp);
10467 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010468 u8 link_up;
10469
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010470 /* save current value of input enable for TX port IF */
10471 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10472 /* disable input for TX port IF */
10473 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10474
Eilon Greenstein061bc702009-10-15 00:18:47 -070010475 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010476 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10477 bnx2x_nic_load(bp, LOAD_DIAG);
10478 /* wait until link state is restored */
10479 bnx2x_wait_for_link(bp, link_up);
10480
10481 if (bnx2x_test_registers(bp) != 0) {
10482 buf[0] = 1;
10483 etest->flags |= ETH_TEST_FL_FAILED;
10484 }
10485 if (bnx2x_test_memory(bp) != 0) {
10486 buf[1] = 1;
10487 etest->flags |= ETH_TEST_FL_FAILED;
10488 }
10489 buf[2] = bnx2x_test_loopback(bp, link_up);
10490 if (buf[2] != 0)
10491 etest->flags |= ETH_TEST_FL_FAILED;
10492
10493 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010494
10495 /* restore input for TX port IF */
10496 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10497
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010498 bnx2x_nic_load(bp, LOAD_NORMAL);
10499 /* wait until link state is restored */
10500 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010501 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010502 if (bnx2x_test_nvram(bp) != 0) {
10503 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010504 etest->flags |= ETH_TEST_FL_FAILED;
10505 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010506 if (bnx2x_test_intr(bp) != 0) {
10507 buf[4] = 1;
10508 etest->flags |= ETH_TEST_FL_FAILED;
10509 }
10510 if (bp->port.pmf)
10511 if (bnx2x_link_test(bp) != 0) {
10512 buf[5] = 1;
10513 etest->flags |= ETH_TEST_FL_FAILED;
10514 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010515
10516#ifdef BNX2X_EXTRA_DEBUG
10517 bnx2x_panic_dump(bp);
10518#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010519}
10520
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010521static const struct {
10522 long offset;
10523 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010524 u8 string[ETH_GSTRING_LEN];
10525} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10526/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10527 { Q_STATS_OFFSET32(error_bytes_received_hi),
10528 8, "[%d]: rx_error_bytes" },
10529 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10530 8, "[%d]: rx_ucast_packets" },
10531 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10532 8, "[%d]: rx_mcast_packets" },
10533 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10534 8, "[%d]: rx_bcast_packets" },
10535 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10536 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10537 4, "[%d]: rx_phy_ip_err_discards"},
10538 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10539 4, "[%d]: rx_skb_alloc_discard" },
10540 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10541
10542/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10543 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10544 8, "[%d]: tx_packets" }
10545};
10546
10547static const struct {
10548 long offset;
10549 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010550 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010551#define STATS_FLAGS_PORT 1
10552#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010553#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010554 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010555} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010556/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10557 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010558 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010559 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010560 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010561 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010562 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010563 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010564 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010565 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010566 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010567 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010568 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010569 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010570 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10571 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10572 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10573 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10574/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10575 8, STATS_FLAGS_PORT, "rx_fragments" },
10576 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10577 8, STATS_FLAGS_PORT, "rx_jabbers" },
10578 { STATS_OFFSET32(no_buff_discard_hi),
10579 8, STATS_FLAGS_BOTH, "rx_discards" },
10580 { STATS_OFFSET32(mac_filter_discard),
10581 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10582 { STATS_OFFSET32(xxoverflow_discard),
10583 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10584 { STATS_OFFSET32(brb_drop_hi),
10585 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10586 { STATS_OFFSET32(brb_truncate_hi),
10587 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10588 { STATS_OFFSET32(pause_frames_received_hi),
10589 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10590 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10591 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10592 { STATS_OFFSET32(nig_timer_max),
10593 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10594/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10595 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10596 { STATS_OFFSET32(rx_skb_alloc_failed),
10597 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10598 { STATS_OFFSET32(hw_csum_err),
10599 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10600
10601 { STATS_OFFSET32(total_bytes_transmitted_hi),
10602 8, STATS_FLAGS_BOTH, "tx_bytes" },
10603 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10604 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10605 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10606 8, STATS_FLAGS_BOTH, "tx_packets" },
10607 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10608 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10609 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10610 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010611 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010612 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010613 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010614 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010615/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010616 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010617 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010618 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010619 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010620 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010621 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010622 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010623 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010624 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010625 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010626 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010627 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010628 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010629 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010630 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010631 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010632 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010633 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010634 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010635/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010636 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010637 { STATS_OFFSET32(pause_frames_sent_hi),
10638 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010639};
10640
Eilon Greensteinde832a52009-02-12 08:36:33 +000010641#define IS_PORT_STAT(i) \
10642 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10643#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10644#define IS_E1HMF_MODE_STAT(bp) \
10645 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010646
Ben Hutchings15f0a392009-10-01 11:58:24 +000010647static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10648{
10649 struct bnx2x *bp = netdev_priv(dev);
10650 int i, num_stats;
10651
10652 switch(stringset) {
10653 case ETH_SS_STATS:
10654 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010655 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000010656 if (!IS_E1HMF_MODE_STAT(bp))
10657 num_stats += BNX2X_NUM_STATS;
10658 } else {
10659 if (IS_E1HMF_MODE_STAT(bp)) {
10660 num_stats = 0;
10661 for (i = 0; i < BNX2X_NUM_STATS; i++)
10662 if (IS_FUNC_STAT(i))
10663 num_stats++;
10664 } else
10665 num_stats = BNX2X_NUM_STATS;
10666 }
10667 return num_stats;
10668
10669 case ETH_SS_TEST:
10670 return BNX2X_NUM_TESTS;
10671
10672 default:
10673 return -EINVAL;
10674 }
10675}
10676
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010677static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10678{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010679 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010680 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010681
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010682 switch (stringset) {
10683 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010684 if (is_multi(bp)) {
10685 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010686 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010687 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10688 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10689 bnx2x_q_stats_arr[j].string, i);
10690 k += BNX2X_NUM_Q_STATS;
10691 }
10692 if (IS_E1HMF_MODE_STAT(bp))
10693 break;
10694 for (j = 0; j < BNX2X_NUM_STATS; j++)
10695 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10696 bnx2x_stats_arr[j].string);
10697 } else {
10698 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10699 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10700 continue;
10701 strcpy(buf + j*ETH_GSTRING_LEN,
10702 bnx2x_stats_arr[i].string);
10703 j++;
10704 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010705 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010706 break;
10707
10708 case ETH_SS_TEST:
10709 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10710 break;
10711 }
10712}
10713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010714static void bnx2x_get_ethtool_stats(struct net_device *dev,
10715 struct ethtool_stats *stats, u64 *buf)
10716{
10717 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010718 u32 *hw_stats, *offset;
10719 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010720
Eilon Greensteinde832a52009-02-12 08:36:33 +000010721 if (is_multi(bp)) {
10722 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010723 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010724 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10725 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10726 if (bnx2x_q_stats_arr[j].size == 0) {
10727 /* skip this counter */
10728 buf[k + j] = 0;
10729 continue;
10730 }
10731 offset = (hw_stats +
10732 bnx2x_q_stats_arr[j].offset);
10733 if (bnx2x_q_stats_arr[j].size == 4) {
10734 /* 4-byte counter */
10735 buf[k + j] = (u64) *offset;
10736 continue;
10737 }
10738 /* 8-byte counter */
10739 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10740 }
10741 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010742 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010743 if (IS_E1HMF_MODE_STAT(bp))
10744 return;
10745 hw_stats = (u32 *)&bp->eth_stats;
10746 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10747 if (bnx2x_stats_arr[j].size == 0) {
10748 /* skip this counter */
10749 buf[k + j] = 0;
10750 continue;
10751 }
10752 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10753 if (bnx2x_stats_arr[j].size == 4) {
10754 /* 4-byte counter */
10755 buf[k + j] = (u64) *offset;
10756 continue;
10757 }
10758 /* 8-byte counter */
10759 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010760 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010761 } else {
10762 hw_stats = (u32 *)&bp->eth_stats;
10763 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10764 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10765 continue;
10766 if (bnx2x_stats_arr[i].size == 0) {
10767 /* skip this counter */
10768 buf[j] = 0;
10769 j++;
10770 continue;
10771 }
10772 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10773 if (bnx2x_stats_arr[i].size == 4) {
10774 /* 4-byte counter */
10775 buf[j] = (u64) *offset;
10776 j++;
10777 continue;
10778 }
10779 /* 8-byte counter */
10780 buf[j] = HILO_U64(*offset, *(offset + 1));
10781 j++;
10782 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010783 }
10784}
10785
10786static int bnx2x_phys_id(struct net_device *dev, u32 data)
10787{
10788 struct bnx2x *bp = netdev_priv(dev);
10789 int i;
10790
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010791 if (!netif_running(dev))
10792 return 0;
10793
10794 if (!bp->port.pmf)
10795 return 0;
10796
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010797 if (data == 0)
10798 data = 2;
10799
10800 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010801 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010802 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10803 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010804 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010805 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010806
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010807 msleep_interruptible(500);
10808 if (signal_pending(current))
10809 break;
10810 }
10811
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010812 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010813 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10814 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010815
10816 return 0;
10817}
10818
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010819static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010820 .get_settings = bnx2x_get_settings,
10821 .set_settings = bnx2x_set_settings,
10822 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010823 .get_regs_len = bnx2x_get_regs_len,
10824 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010825 .get_wol = bnx2x_get_wol,
10826 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010827 .get_msglevel = bnx2x_get_msglevel,
10828 .set_msglevel = bnx2x_set_msglevel,
10829 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010830 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010831 .get_eeprom_len = bnx2x_get_eeprom_len,
10832 .get_eeprom = bnx2x_get_eeprom,
10833 .set_eeprom = bnx2x_set_eeprom,
10834 .get_coalesce = bnx2x_get_coalesce,
10835 .set_coalesce = bnx2x_set_coalesce,
10836 .get_ringparam = bnx2x_get_ringparam,
10837 .set_ringparam = bnx2x_set_ringparam,
10838 .get_pauseparam = bnx2x_get_pauseparam,
10839 .set_pauseparam = bnx2x_set_pauseparam,
10840 .get_rx_csum = bnx2x_get_rx_csum,
10841 .set_rx_csum = bnx2x_set_rx_csum,
10842 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010843 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010844 .set_flags = bnx2x_set_flags,
10845 .get_flags = ethtool_op_get_flags,
10846 .get_sg = ethtool_op_get_sg,
10847 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010848 .get_tso = ethtool_op_get_tso,
10849 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010850 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010851 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010852 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010853 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010854 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010855};
10856
10857/* end of ethtool_ops */
10858
10859/****************************************************************************
10860* General service functions
10861****************************************************************************/
10862
10863static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10864{
10865 u16 pmcsr;
10866
10867 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10868
10869 switch (state) {
10870 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010871 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010872 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10873 PCI_PM_CTRL_PME_STATUS));
10874
10875 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010876 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010877 msleep(20);
10878 break;
10879
10880 case PCI_D3hot:
10881 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10882 pmcsr |= 3;
10883
10884 if (bp->wol)
10885 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10886
10887 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10888 pmcsr);
10889
10890 /* No more memory access after this point until
10891 * device is brought back to D0.
10892 */
10893 break;
10894
10895 default:
10896 return -EINVAL;
10897 }
10898 return 0;
10899}
10900
Eilon Greenstein237907c2009-01-14 06:42:44 +000010901static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10902{
10903 u16 rx_cons_sb;
10904
10905 /* Tell compiler that status block fields can change */
10906 barrier();
10907 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10908 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10909 rx_cons_sb++;
10910 return (fp->rx_comp_cons != rx_cons_sb);
10911}
10912
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010913/*
10914 * net_device service functions
10915 */
10916
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010917static int bnx2x_poll(struct napi_struct *napi, int budget)
10918{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010919 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010920 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10921 napi);
10922 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010923
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010924 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010925#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010926 if (unlikely(bp->panic)) {
10927 napi_complete(napi);
10928 return 0;
10929 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010930#endif
10931
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010932 if (bnx2x_has_tx_work(fp))
10933 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010934
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010935 if (bnx2x_has_rx_work(fp)) {
10936 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010937
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010938 /* must not complete if we consumed full budget */
10939 if (work_done >= budget)
10940 break;
10941 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010942
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010943 /* Fall out from the NAPI loop if needed */
10944 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10945 bnx2x_update_fpsb_idx(fp);
10946 /* bnx2x_has_rx_work() reads the status block, thus we need
10947 * to ensure that status block indices have been actually read
10948 * (bnx2x_update_fpsb_idx) prior to this check
10949 * (bnx2x_has_rx_work) so that we won't write the "newer"
10950 * value of the status block to IGU (if there was a DMA right
10951 * after bnx2x_has_rx_work and if there is no rmb, the memory
10952 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10953 * before bnx2x_ack_sb). In this case there will never be
10954 * another interrupt until there is another update of the
10955 * status block, while there is still unhandled work.
10956 */
10957 rmb();
10958
10959 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10960 napi_complete(napi);
10961 /* Re-enable interrupts */
10962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10963 le16_to_cpu(fp->fp_c_idx),
10964 IGU_INT_NOP, 1);
10965 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10966 le16_to_cpu(fp->fp_u_idx),
10967 IGU_INT_ENABLE, 1);
10968 break;
10969 }
10970 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000010971 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010972
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010973 return work_done;
10974}
10975
Eilon Greenstein755735e2008-06-23 20:35:13 -070010976
10977/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010978 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070010979 * we use one mapping for both BDs
10980 * So far this has only been observed to happen
10981 * in Other Operating Systems(TM)
10982 */
10983static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10984 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070010985 struct sw_tx_bd *tx_buf,
10986 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010987 u16 bd_prod, int nbd)
10988{
Eilon Greensteinca003922009-08-12 22:53:28 -070010989 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010990 struct eth_tx_bd *d_tx_bd;
10991 dma_addr_t mapping;
10992 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10993
10994 /* first fix first BD */
10995 h_tx_bd->nbd = cpu_to_le16(nbd);
10996 h_tx_bd->nbytes = cpu_to_le16(hlen);
10997
10998 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10999 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11000 h_tx_bd->addr_lo, h_tx_bd->nbd);
11001
11002 /* now get a new data BD
11003 * (after the pbd) and fill it */
11004 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011005 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011006
11007 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11008 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11009
11010 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11011 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11012 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011013
11014 /* this marks the BD as one that has no individual mapping */
11015 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11016
Eilon Greenstein755735e2008-06-23 20:35:13 -070011017 DP(NETIF_MSG_TX_QUEUED,
11018 "TSO split data size is %d (%x:%x)\n",
11019 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11020
Eilon Greensteinca003922009-08-12 22:53:28 -070011021 /* update tx_bd */
11022 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011023
11024 return bd_prod;
11025}
11026
11027static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11028{
11029 if (fix > 0)
11030 csum = (u16) ~csum_fold(csum_sub(csum,
11031 csum_partial(t_header - fix, fix, 0)));
11032
11033 else if (fix < 0)
11034 csum = (u16) ~csum_fold(csum_add(csum,
11035 csum_partial(t_header, -fix, 0)));
11036
11037 return swab16(csum);
11038}
11039
11040static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11041{
11042 u32 rc;
11043
11044 if (skb->ip_summed != CHECKSUM_PARTIAL)
11045 rc = XMIT_PLAIN;
11046
11047 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011048 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070011049 rc = XMIT_CSUM_V6;
11050 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11051 rc |= XMIT_CSUM_TCP;
11052
11053 } else {
11054 rc = XMIT_CSUM_V4;
11055 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11056 rc |= XMIT_CSUM_TCP;
11057 }
11058 }
11059
11060 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011061 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011062
11063 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011064 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011065
11066 return rc;
11067}
11068
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011069#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011070/* check if packet requires linearization (packet is too fragmented)
11071 no need to check fragmentation if page size > 8K (there will be no
11072 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011073static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11074 u32 xmit_type)
11075{
11076 int to_copy = 0;
11077 int hlen = 0;
11078 int first_bd_sz = 0;
11079
11080 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11081 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11082
11083 if (xmit_type & XMIT_GSO) {
11084 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11085 /* Check if LSO packet needs to be copied:
11086 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11087 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011088 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011089 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11090 int wnd_idx = 0;
11091 int frag_idx = 0;
11092 u32 wnd_sum = 0;
11093
11094 /* Headers length */
11095 hlen = (int)(skb_transport_header(skb) - skb->data) +
11096 tcp_hdrlen(skb);
11097
11098 /* Amount of data (w/o headers) on linear part of SKB*/
11099 first_bd_sz = skb_headlen(skb) - hlen;
11100
11101 wnd_sum = first_bd_sz;
11102
11103 /* Calculate the first sum - it's special */
11104 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11105 wnd_sum +=
11106 skb_shinfo(skb)->frags[frag_idx].size;
11107
11108 /* If there was data on linear skb data - check it */
11109 if (first_bd_sz > 0) {
11110 if (unlikely(wnd_sum < lso_mss)) {
11111 to_copy = 1;
11112 goto exit_lbl;
11113 }
11114
11115 wnd_sum -= first_bd_sz;
11116 }
11117
11118 /* Others are easier: run through the frag list and
11119 check all windows */
11120 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11121 wnd_sum +=
11122 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11123
11124 if (unlikely(wnd_sum < lso_mss)) {
11125 to_copy = 1;
11126 break;
11127 }
11128 wnd_sum -=
11129 skb_shinfo(skb)->frags[wnd_idx].size;
11130 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070011131 } else {
11132 /* in non-LSO too fragmented packet should always
11133 be linearized */
11134 to_copy = 1;
11135 }
11136 }
11137
11138exit_lbl:
11139 if (unlikely(to_copy))
11140 DP(NETIF_MSG_TX_QUEUED,
11141 "Linearization IS REQUIRED for %s packet. "
11142 "num_frags %d hlen %d first_bd_sz %d\n",
11143 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11144 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11145
11146 return to_copy;
11147}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011148#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011149
11150/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011151 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070011152 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011153 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011154static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011155{
11156 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011157 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011158 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011159 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011160 struct eth_tx_start_bd *tx_start_bd;
11161 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011162 struct eth_tx_parse_bd *pbd = NULL;
11163 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011164 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011165 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011166 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011167 int i;
11168 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011169 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011170
11171#ifdef BNX2X_STOP_ON_ERROR
11172 if (unlikely(bp->panic))
11173 return NETDEV_TX_BUSY;
11174#endif
11175
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011176 fp_index = skb_get_queue_mapping(skb);
11177 txq = netdev_get_tx_queue(dev, fp_index);
11178
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011179 fp = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070011180
Yitchak Gertner231fd582008-08-25 15:27:06 -070011181 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011182 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011183 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011184 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11185 return NETDEV_TX_BUSY;
11186 }
11187
Eilon Greenstein755735e2008-06-23 20:35:13 -070011188 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11189 " gso type %x xmit_type %x\n",
11190 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11191 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11192
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011193#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011194 /* First, check if we need to linearize the skb (due to FW
11195 restrictions). No need to check fragmentation if page size > 8K
11196 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011197 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11198 /* Statistics of linearization */
11199 bp->lin_cnt++;
11200 if (skb_linearize(skb) != 0) {
11201 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11202 "silently dropping this SKB\n");
11203 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011204 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011205 }
11206 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011207#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011208
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011209 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070011210 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011211 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735e2008-06-23 20:35:13 -070011212 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011213 (don't forget to mark the last one as last,
11214 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070011215 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011216 */
11217
11218 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011219 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011220
Eilon Greenstein755735e2008-06-23 20:35:13 -070011221 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011222 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011223 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011224
Eilon Greensteinca003922009-08-12 22:53:28 -070011225 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11226 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11227 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011228 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011229 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011230
Eilon Greenstein755735e2008-06-23 20:35:13 -070011231 /* remember the first BD of the packet */
11232 tx_buf->first_bd = fp->tx_bd_prod;
11233 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011234 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011235
11236 DP(NETIF_MSG_TX_QUEUED,
11237 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011238 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011239
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011240#ifdef BCM_VLAN
11241 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11242 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011243 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11244 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011245 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011246#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011247 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011248
Eilon Greensteinca003922009-08-12 22:53:28 -070011249 /* turn on parsing and get a BD */
11250 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11251 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011252
Eilon Greensteinca003922009-08-12 22:53:28 -070011253 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011254
11255 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011256 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011257
11258 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011259 pbd->global_data =
11260 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11261 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011262
11263 pbd->ip_hlen = (skb_transport_header(skb) -
11264 skb_network_header(skb)) / 2;
11265
11266 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11267
11268 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011269 hlen = hlen*2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011270
Eilon Greensteinca003922009-08-12 22:53:28 -070011271 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011272
11273 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011274 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735e2008-06-23 20:35:13 -070011275 ETH_TX_BD_FLAGS_IP_CSUM;
11276 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011277 tx_start_bd->bd_flags.as_bitfield |=
11278 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011279
11280 if (xmit_type & XMIT_CSUM_TCP) {
11281 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11282
11283 } else {
11284 s8 fix = SKB_CS_OFF(skb); /* signed! */
11285
Eilon Greensteinca003922009-08-12 22:53:28 -070011286 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011287
11288 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011289 "hlen %d fix %d csum before fix %x\n",
11290 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011291
11292 /* HW bug: fixup the CSUM */
11293 pbd->tcp_pseudo_csum =
11294 bnx2x_csum_fix(skb_transport_header(skb),
11295 SKB_CS(skb), fix);
11296
11297 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11298 pbd->tcp_pseudo_csum);
11299 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011300 }
11301
11302 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011303 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011304
Eilon Greensteinca003922009-08-12 22:53:28 -070011305 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11306 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11307 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11308 tx_start_bd->nbd = cpu_to_le16(nbd);
11309 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11310 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011311
11312 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070011313 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011314 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11315 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11316 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011317
Eilon Greenstein755735e2008-06-23 20:35:13 -070011318 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011319
11320 DP(NETIF_MSG_TX_QUEUED,
11321 "TSO packet len %d hlen %d total len %d tso size %d\n",
11322 skb->len, hlen, skb_headlen(skb),
11323 skb_shinfo(skb)->gso_size);
11324
Eilon Greensteinca003922009-08-12 22:53:28 -070011325 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011326
Eilon Greenstein755735e2008-06-23 20:35:13 -070011327 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011328 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11329 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011330
11331 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11332 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011333 pbd->tcp_flags = pbd_tcp_flags(skb);
11334
11335 if (xmit_type & XMIT_GSO_V4) {
11336 pbd->ip_id = swab16(ip_hdr(skb)->id);
11337 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011338 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11339 ip_hdr(skb)->daddr,
11340 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011341
11342 } else
11343 pbd->tcp_pseudo_csum =
11344 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11345 &ipv6_hdr(skb)->daddr,
11346 0, IPPROTO_TCP, 0));
11347
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011348 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11349 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011350 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011351
Eilon Greenstein755735e2008-06-23 20:35:13 -070011352 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11353 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011354
Eilon Greenstein755735e2008-06-23 20:35:13 -070011355 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011356 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11357 if (total_pkt_bd == NULL)
11358 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011359
Eilon Greenstein755735e2008-06-23 20:35:13 -070011360 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11361 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011362
Eilon Greensteinca003922009-08-12 22:53:28 -070011363 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11364 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11365 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11366 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011367
Eilon Greenstein755735e2008-06-23 20:35:13 -070011368 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011369 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11370 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11371 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011372 }
11373
Eilon Greensteinca003922009-08-12 22:53:28 -070011374 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011375
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011376 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11377
Eilon Greenstein755735e2008-06-23 20:35:13 -070011378 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011379 * if the packet contains or ends with it
11380 */
11381 if (TX_BD_POFF(bd_prod) < nbd)
11382 nbd++;
11383
Eilon Greensteinca003922009-08-12 22:53:28 -070011384 if (total_pkt_bd != NULL)
11385 total_pkt_bd->total_pkt_bytes = pkt_size;
11386
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011387 if (pbd)
11388 DP(NETIF_MSG_TX_QUEUED,
11389 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11390 " tcp_flags %x xsum %x seq %u hlen %u\n",
11391 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11392 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011393 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011394
Eilon Greenstein755735e2008-06-23 20:35:13 -070011395 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011396
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011397 /*
11398 * Make sure that the BD data is updated before updating the producer
11399 * since FW might read the BD right after the producer is updated.
11400 * This is only applicable for weak-ordered memory model archs such
11401 * as IA-64. The following barrier is also mandatory since FW will
11402 * assumes packets must have BDs.
11403 */
11404 wmb();
11405
Eilon Greensteinca003922009-08-12 22:53:28 -070011406 fp->tx_db.data.prod += nbd;
11407 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011408 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011409
11410 mmiowb();
11411
Eilon Greenstein755735e2008-06-23 20:35:13 -070011412 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011413
11414 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011415 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011416 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11417 if we put Tx into XOFF state. */
11418 smp_mb();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011419 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011420 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011421 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011422 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011423 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011424
11425 return NETDEV_TX_OK;
11426}
11427
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011428/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011429static int bnx2x_open(struct net_device *dev)
11430{
11431 struct bnx2x *bp = netdev_priv(dev);
11432
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011433 netif_carrier_off(dev);
11434
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011435 bnx2x_set_power_state(bp, PCI_D0);
11436
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011437 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011438}
11439
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011440/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011441static int bnx2x_close(struct net_device *dev)
11442{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011443 struct bnx2x *bp = netdev_priv(dev);
11444
11445 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011446 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11447 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11448 if (!CHIP_REV_IS_SLOW(bp))
11449 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011450
11451 return 0;
11452}
11453
Eilon Greensteinf5372252009-02-12 08:38:30 +000011454/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011455static void bnx2x_set_rx_mode(struct net_device *dev)
11456{
11457 struct bnx2x *bp = netdev_priv(dev);
11458 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11459 int port = BP_PORT(bp);
11460
11461 if (bp->state != BNX2X_STATE_OPEN) {
11462 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11463 return;
11464 }
11465
11466 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11467
11468 if (dev->flags & IFF_PROMISC)
11469 rx_mode = BNX2X_RX_MODE_PROMISC;
11470
11471 else if ((dev->flags & IFF_ALLMULTI) ||
11472 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11473 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11474
11475 else { /* some multicasts */
11476 if (CHIP_IS_E1(bp)) {
11477 int i, old, offset;
11478 struct dev_mc_list *mclist;
11479 struct mac_configuration_cmd *config =
11480 bnx2x_sp(bp, mcast_config);
11481
11482 for (i = 0, mclist = dev->mc_list;
11483 mclist && (i < dev->mc_count);
11484 i++, mclist = mclist->next) {
11485
11486 config->config_table[i].
11487 cam_entry.msb_mac_addr =
11488 swab16(*(u16 *)&mclist->dmi_addr[0]);
11489 config->config_table[i].
11490 cam_entry.middle_mac_addr =
11491 swab16(*(u16 *)&mclist->dmi_addr[2]);
11492 config->config_table[i].
11493 cam_entry.lsb_mac_addr =
11494 swab16(*(u16 *)&mclist->dmi_addr[4]);
11495 config->config_table[i].cam_entry.flags =
11496 cpu_to_le16(port);
11497 config->config_table[i].
11498 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011499 config->config_table[i].target_table_entry.
11500 clients_bit_vector =
11501 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011502 config->config_table[i].
11503 target_table_entry.vlan_id = 0;
11504
11505 DP(NETIF_MSG_IFUP,
11506 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11507 config->config_table[i].
11508 cam_entry.msb_mac_addr,
11509 config->config_table[i].
11510 cam_entry.middle_mac_addr,
11511 config->config_table[i].
11512 cam_entry.lsb_mac_addr);
11513 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011514 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011515 if (old > i) {
11516 for (; i < old; i++) {
11517 if (CAM_IS_INVALID(config->
11518 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011519 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011520 break;
11521 }
11522 /* invalidate */
11523 CAM_INVALIDATE(config->
11524 config_table[i]);
11525 }
11526 }
11527
11528 if (CHIP_REV_IS_SLOW(bp))
11529 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11530 else
11531 offset = BNX2X_MAX_MULTICAST*(1 + port);
11532
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011533 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011534 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011535 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011536 config->hdr.reserved1 = 0;
11537
Michael Chane665bfd2009-10-10 13:46:54 +000011538 bp->set_mac_pending++;
11539 smp_wmb();
11540
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011541 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11542 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11543 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11544 0);
11545 } else { /* E1H */
11546 /* Accept one or more multicasts */
11547 struct dev_mc_list *mclist;
11548 u32 mc_filter[MC_HASH_SIZE];
11549 u32 crc, bit, regidx;
11550 int i;
11551
11552 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11553
11554 for (i = 0, mclist = dev->mc_list;
11555 mclist && (i < dev->mc_count);
11556 i++, mclist = mclist->next) {
11557
Johannes Berg7c510e42008-10-27 17:47:26 -070011558 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11559 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011560
11561 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11562 bit = (crc >> 24) & 0xff;
11563 regidx = bit >> 5;
11564 bit &= 0x1f;
11565 mc_filter[regidx] |= (1 << bit);
11566 }
11567
11568 for (i = 0; i < MC_HASH_SIZE; i++)
11569 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11570 mc_filter[i]);
11571 }
11572 }
11573
11574 bp->rx_mode = rx_mode;
11575 bnx2x_set_storm_rx_mode(bp);
11576}
11577
11578/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011579static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11580{
11581 struct sockaddr *addr = p;
11582 struct bnx2x *bp = netdev_priv(dev);
11583
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011584 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011585 return -EINVAL;
11586
11587 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011588 if (netif_running(dev)) {
11589 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011590 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011591 else
Michael Chane665bfd2009-10-10 13:46:54 +000011592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011593 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011594
11595 return 0;
11596}
11597
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011598/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011599static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11600 int devad, u16 addr)
11601{
11602 struct bnx2x *bp = netdev_priv(netdev);
11603 u16 value;
11604 int rc;
11605 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11606
11607 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11608 prtad, devad, addr);
11609
11610 if (prtad != bp->mdio.prtad) {
11611 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11612 prtad, bp->mdio.prtad);
11613 return -EINVAL;
11614 }
11615
11616 /* The HW expects different devad if CL22 is used */
11617 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11618
11619 bnx2x_acquire_phy_lock(bp);
11620 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11621 devad, addr, &value);
11622 bnx2x_release_phy_lock(bp);
11623 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11624
11625 if (!rc)
11626 rc = value;
11627 return rc;
11628}
11629
11630/* called with rtnl_lock */
11631static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11632 u16 addr, u16 value)
11633{
11634 struct bnx2x *bp = netdev_priv(netdev);
11635 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11636 int rc;
11637
11638 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11639 " value 0x%x\n", prtad, devad, addr, value);
11640
11641 if (prtad != bp->mdio.prtad) {
11642 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11643 prtad, bp->mdio.prtad);
11644 return -EINVAL;
11645 }
11646
11647 /* The HW expects different devad if CL22 is used */
11648 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11649
11650 bnx2x_acquire_phy_lock(bp);
11651 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11652 devad, addr, value);
11653 bnx2x_release_phy_lock(bp);
11654 return rc;
11655}
11656
11657/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011658static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11659{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011660 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011661 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011662
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011663 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11664 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011665
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011666 if (!netif_running(dev))
11667 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011668
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011669 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011670}
11671
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011672/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011673static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11674{
11675 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011676 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011677
11678 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11679 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11680 return -EINVAL;
11681
11682 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011683 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011684 * only updated as part of load
11685 */
11686 dev->mtu = new_mtu;
11687
11688 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011689 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11690 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011691 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011692
11693 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011694}
11695
11696static void bnx2x_tx_timeout(struct net_device *dev)
11697{
11698 struct bnx2x *bp = netdev_priv(dev);
11699
11700#ifdef BNX2X_STOP_ON_ERROR
11701 if (!bp->panic)
11702 bnx2x_panic();
11703#endif
11704 /* This allows the netif to be shutdown gracefully before resetting */
11705 schedule_work(&bp->reset_task);
11706}
11707
11708#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011709/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011710static void bnx2x_vlan_rx_register(struct net_device *dev,
11711 struct vlan_group *vlgrp)
11712{
11713 struct bnx2x *bp = netdev_priv(dev);
11714
11715 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011716
11717 /* Set flags according to the required capabilities */
11718 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11719
11720 if (dev->features & NETIF_F_HW_VLAN_TX)
11721 bp->flags |= HW_VLAN_TX_FLAG;
11722
11723 if (dev->features & NETIF_F_HW_VLAN_RX)
11724 bp->flags |= HW_VLAN_RX_FLAG;
11725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011726 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011727 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011728}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011729
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011730#endif
11731
11732#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11733static void poll_bnx2x(struct net_device *dev)
11734{
11735 struct bnx2x *bp = netdev_priv(dev);
11736
11737 disable_irq(bp->pdev->irq);
11738 bnx2x_interrupt(bp->pdev->irq, dev);
11739 enable_irq(bp->pdev->irq);
11740}
11741#endif
11742
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011743static const struct net_device_ops bnx2x_netdev_ops = {
11744 .ndo_open = bnx2x_open,
11745 .ndo_stop = bnx2x_close,
11746 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011747 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011748 .ndo_set_mac_address = bnx2x_change_mac_addr,
11749 .ndo_validate_addr = eth_validate_addr,
11750 .ndo_do_ioctl = bnx2x_ioctl,
11751 .ndo_change_mtu = bnx2x_change_mtu,
11752 .ndo_tx_timeout = bnx2x_tx_timeout,
11753#ifdef BCM_VLAN
11754 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11755#endif
11756#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11757 .ndo_poll_controller = poll_bnx2x,
11758#endif
11759};
11760
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011761static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11762 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011763{
11764 struct bnx2x *bp;
11765 int rc;
11766
11767 SET_NETDEV_DEV(dev, &pdev->dev);
11768 bp = netdev_priv(dev);
11769
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011770 bp->dev = dev;
11771 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011772 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011773 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011774
11775 rc = pci_enable_device(pdev);
11776 if (rc) {
11777 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11778 goto err_out;
11779 }
11780
11781 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11782 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11783 " aborting\n");
11784 rc = -ENODEV;
11785 goto err_out_disable;
11786 }
11787
11788 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11789 printk(KERN_ERR PFX "Cannot find second PCI device"
11790 " base address, aborting\n");
11791 rc = -ENODEV;
11792 goto err_out_disable;
11793 }
11794
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011795 if (atomic_read(&pdev->enable_cnt) == 1) {
11796 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11797 if (rc) {
11798 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11799 " aborting\n");
11800 goto err_out_disable;
11801 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011802
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011803 pci_set_master(pdev);
11804 pci_save_state(pdev);
11805 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011806
11807 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11808 if (bp->pm_cap == 0) {
11809 printk(KERN_ERR PFX "Cannot find power management"
11810 " capability, aborting\n");
11811 rc = -EIO;
11812 goto err_out_release;
11813 }
11814
11815 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11816 if (bp->pcie_cap == 0) {
11817 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11818 " aborting\n");
11819 rc = -EIO;
11820 goto err_out_release;
11821 }
11822
Yang Hongyang6a355282009-04-06 19:01:13 -070011823 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011824 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011825 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011826 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11827 " failed, aborting\n");
11828 rc = -EIO;
11829 goto err_out_release;
11830 }
11831
Yang Hongyang284901a2009-04-06 19:01:15 -070011832 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011833 printk(KERN_ERR PFX "System does not support DMA,"
11834 " aborting\n");
11835 rc = -EIO;
11836 goto err_out_release;
11837 }
11838
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011839 dev->mem_start = pci_resource_start(pdev, 0);
11840 dev->base_addr = dev->mem_start;
11841 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011842
11843 dev->irq = pdev->irq;
11844
Arjan van de Ven275f1652008-10-20 21:42:39 -070011845 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011846 if (!bp->regview) {
11847 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11848 rc = -ENOMEM;
11849 goto err_out_release;
11850 }
11851
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011852 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11853 min_t(u64, BNX2X_DB_SIZE,
11854 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011855 if (!bp->doorbells) {
11856 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11857 rc = -ENOMEM;
11858 goto err_out_unmap;
11859 }
11860
11861 bnx2x_set_power_state(bp, PCI_D0);
11862
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011863 /* clean indirect addresses */
11864 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11865 PCICFG_VENDOR_ID_OFFSET);
11866 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11867 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11868 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11869 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011870
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011871 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011872
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011873 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011874 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011875 dev->features |= NETIF_F_SG;
11876 dev->features |= NETIF_F_HW_CSUM;
11877 if (bp->flags & USING_DAC_FLAG)
11878 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011879 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11880 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011881#ifdef BCM_VLAN
11882 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011883 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011884
11885 dev->vlan_features |= NETIF_F_SG;
11886 dev->vlan_features |= NETIF_F_HW_CSUM;
11887 if (bp->flags & USING_DAC_FLAG)
11888 dev->vlan_features |= NETIF_F_HIGHDMA;
11889 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11890 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011891#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011892
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011893 /* get_port_hwinfo() will set prtad and mmds properly */
11894 bp->mdio.prtad = MDIO_PRTAD_NONE;
11895 bp->mdio.mmds = 0;
11896 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11897 bp->mdio.dev = dev;
11898 bp->mdio.mdio_read = bnx2x_mdio_read;
11899 bp->mdio.mdio_write = bnx2x_mdio_write;
11900
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011901 return 0;
11902
11903err_out_unmap:
11904 if (bp->regview) {
11905 iounmap(bp->regview);
11906 bp->regview = NULL;
11907 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011908 if (bp->doorbells) {
11909 iounmap(bp->doorbells);
11910 bp->doorbells = NULL;
11911 }
11912
11913err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011914 if (atomic_read(&pdev->enable_cnt) == 1)
11915 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011916
11917err_out_disable:
11918 pci_disable_device(pdev);
11919 pci_set_drvdata(pdev, NULL);
11920
11921err_out:
11922 return rc;
11923}
11924
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011925static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11926 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011927{
11928 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11929
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011930 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11931
11932 /* return value of 1=2.5GHz 2=5GHz */
11933 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011934}
11935
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011936static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11937{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011938 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011939 struct bnx2x_fw_file_hdr *fw_hdr;
11940 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011941 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011942 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011943 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011944 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011945
11946 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11947 return -EINVAL;
11948
11949 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11950 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11951
11952 /* Make sure none of the offsets and sizes make us read beyond
11953 * the end of the firmware data */
11954 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11955 offset = be32_to_cpu(sections[i].offset);
11956 len = be32_to_cpu(sections[i].len);
11957 if (offset + len > firmware->size) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011958 printk(KERN_ERR PFX "Section %d length is out of "
11959 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011960 return -EINVAL;
11961 }
11962 }
11963
11964 /* Likewise for the init_ops offsets */
11965 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11966 ops_offsets = (u16 *)(firmware->data + offset);
11967 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11968
11969 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11970 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011971 printk(KERN_ERR PFX "Section offset %d is out of "
11972 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011973 return -EINVAL;
11974 }
11975 }
11976
11977 /* Check FW version */
11978 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11979 fw_ver = firmware->data + offset;
11980 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11981 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11982 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11983 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11984 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11985 " Should be %d.%d.%d.%d\n",
11986 fw_ver[0], fw_ver[1], fw_ver[2],
11987 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11988 BCM_5710_FW_MINOR_VERSION,
11989 BCM_5710_FW_REVISION_VERSION,
11990 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011991 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011992 }
11993
11994 return 0;
11995}
11996
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011997static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011998{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011999 const __be32 *source = (const __be32 *)_source;
12000 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012001 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012002
12003 for (i = 0; i < n/4; i++)
12004 target[i] = be32_to_cpu(source[i]);
12005}
12006
12007/*
12008 Ops array is stored in the following format:
12009 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12010 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012011static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012012{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012013 const __be32 *source = (const __be32 *)_source;
12014 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012015 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012016
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012017 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012018 tmp = be32_to_cpu(source[j]);
12019 target[i].op = (tmp >> 24) & 0xff;
12020 target[i].offset = tmp & 0xffffff;
12021 target[i].raw_data = be32_to_cpu(source[j+1]);
12022 }
12023}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012024
12025static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012026{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012027 const __be16 *source = (const __be16 *)_source;
12028 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012029 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012030
12031 for (i = 0; i < n/2; i++)
12032 target[i] = be16_to_cpu(source[i]);
12033}
12034
12035#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012036 do { \
12037 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12038 bp->arr = kmalloc(len, GFP_KERNEL); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012039 if (!bp->arr) { \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012040 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12041 "for "#arr"\n", len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012042 goto lbl; \
12043 } \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012044 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12045 (u8 *)bp->arr, len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012046 } while (0)
12047
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012048static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12049{
Ben Hutchings45229b42009-11-07 11:53:39 +000012050 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012051 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012052 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012053
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012054 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012055 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012056 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012057 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012058
12059 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12060
12061 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12062 if (rc) {
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012063 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12064 fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012065 goto request_firmware_exit;
12066 }
12067
12068 rc = bnx2x_check_firmware(bp);
12069 if (rc) {
12070 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12071 goto request_firmware_exit;
12072 }
12073
12074 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12075
12076 /* Initialize the pointers to the init arrays */
12077 /* Blob */
12078 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12079
12080 /* Opcodes */
12081 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12082
12083 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012084 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12085 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012086
12087 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012088 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12089 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12090 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12091 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12092 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12093 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12094 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12095 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12096 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12097 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12098 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12099 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12100 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12101 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12102 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12103 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012104
12105 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012106
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012107init_offsets_alloc_err:
12108 kfree(bp->init_ops);
12109init_ops_alloc_err:
12110 kfree(bp->init_data);
12111request_firmware_exit:
12112 release_firmware(bp->firmware);
12113
12114 return rc;
12115}
12116
12117
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012118static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12119 const struct pci_device_id *ent)
12120{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012121 struct net_device *dev = NULL;
12122 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012123 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012124 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012125
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012126 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012127 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012128 if (!dev) {
12129 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012130 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012131 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012132
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012133 bp = netdev_priv(dev);
12134 bp->msglevel = debug;
12135
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012136 pci_set_drvdata(pdev, dev);
12137
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012138 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012139 if (rc < 0) {
12140 free_netdev(dev);
12141 return rc;
12142 }
12143
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012144 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012145 if (rc)
12146 goto init_one_exit;
12147
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012148 /* Set init arrays */
12149 rc = bnx2x_init_firmware(bp, &pdev->dev);
12150 if (rc) {
12151 printk(KERN_ERR PFX "Error loading firmware\n");
12152 goto init_one_exit;
12153 }
12154
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012155 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012156 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012157 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012158 goto init_one_exit;
12159 }
12160
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012161 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Eliezer Tamir25047952008-02-28 11:50:16 -080012162 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000012163 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012164 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012165 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
Eliezer Tamir25047952008-02-28 11:50:16 -080012166 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070012167 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012168
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012169 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012170
12171init_one_exit:
12172 if (bp->regview)
12173 iounmap(bp->regview);
12174
12175 if (bp->doorbells)
12176 iounmap(bp->doorbells);
12177
12178 free_netdev(dev);
12179
12180 if (atomic_read(&pdev->enable_cnt) == 1)
12181 pci_release_regions(pdev);
12182
12183 pci_disable_device(pdev);
12184 pci_set_drvdata(pdev, NULL);
12185
12186 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012187}
12188
12189static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12190{
12191 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012192 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012193
Eliezer Tamir228241e2008-02-28 11:56:57 -080012194 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080012195 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12196 return;
12197 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012198 bp = netdev_priv(dev);
12199
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012200 unregister_netdev(dev);
12201
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012202 kfree(bp->init_ops_offsets);
12203 kfree(bp->init_ops);
12204 kfree(bp->init_data);
12205 release_firmware(bp->firmware);
12206
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012207 if (bp->regview)
12208 iounmap(bp->regview);
12209
12210 if (bp->doorbells)
12211 iounmap(bp->doorbells);
12212
12213 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012214
12215 if (atomic_read(&pdev->enable_cnt) == 1)
12216 pci_release_regions(pdev);
12217
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012218 pci_disable_device(pdev);
12219 pci_set_drvdata(pdev, NULL);
12220}
12221
12222static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12223{
12224 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012225 struct bnx2x *bp;
12226
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012227 if (!dev) {
12228 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12229 return -ENODEV;
12230 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012231 bp = netdev_priv(dev);
12232
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012233 rtnl_lock();
12234
12235 pci_save_state(pdev);
12236
12237 if (!netif_running(dev)) {
12238 rtnl_unlock();
12239 return 0;
12240 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012241
12242 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012243
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012244 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012245
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012246 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012247
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012248 rtnl_unlock();
12249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012250 return 0;
12251}
12252
12253static int bnx2x_resume(struct pci_dev *pdev)
12254{
12255 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012256 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012257 int rc;
12258
Eliezer Tamir228241e2008-02-28 11:56:57 -080012259 if (!dev) {
12260 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12261 return -ENODEV;
12262 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012263 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012264
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012265 rtnl_lock();
12266
Eliezer Tamir228241e2008-02-28 11:56:57 -080012267 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012268
12269 if (!netif_running(dev)) {
12270 rtnl_unlock();
12271 return 0;
12272 }
12273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012274 bnx2x_set_power_state(bp, PCI_D0);
12275 netif_device_attach(dev);
12276
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012277 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012278
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012279 rtnl_unlock();
12280
12281 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012282}
12283
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012284static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12285{
12286 int i;
12287
12288 bp->state = BNX2X_STATE_ERROR;
12289
12290 bp->rx_mode = BNX2X_RX_MODE_NONE;
12291
12292 bnx2x_netif_stop(bp, 0);
12293
12294 del_timer_sync(&bp->timer);
12295 bp->stats_state = STATS_STATE_DISABLED;
12296 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12297
12298 /* Release IRQs */
12299 bnx2x_free_irq(bp);
12300
12301 if (CHIP_IS_E1(bp)) {
12302 struct mac_configuration_cmd *config =
12303 bnx2x_sp(bp, mcast_config);
12304
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012305 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012306 CAM_INVALIDATE(config->config_table[i]);
12307 }
12308
12309 /* Free SKBs, SGEs, TPA pool and driver internals */
12310 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012311 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012312 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012313 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012314 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012315 bnx2x_free_mem(bp);
12316
12317 bp->state = BNX2X_STATE_CLOSED;
12318
12319 netif_carrier_off(bp->dev);
12320
12321 return 0;
12322}
12323
12324static void bnx2x_eeh_recover(struct bnx2x *bp)
12325{
12326 u32 val;
12327
12328 mutex_init(&bp->port.phy_mutex);
12329
12330 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12331 bp->link_params.shmem_base = bp->common.shmem_base;
12332 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12333
12334 if (!bp->common.shmem_base ||
12335 (bp->common.shmem_base < 0xA0000) ||
12336 (bp->common.shmem_base >= 0xC0000)) {
12337 BNX2X_DEV_INFO("MCP not active\n");
12338 bp->flags |= NO_MCP_FLAG;
12339 return;
12340 }
12341
12342 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12343 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12344 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12345 BNX2X_ERR("BAD MCP validity signature\n");
12346
12347 if (!BP_NOMCP(bp)) {
12348 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12349 & DRV_MSG_SEQ_NUMBER_MASK);
12350 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12351 }
12352}
12353
Wendy Xiong493adb12008-06-23 20:36:22 -070012354/**
12355 * bnx2x_io_error_detected - called when PCI error is detected
12356 * @pdev: Pointer to PCI device
12357 * @state: The current pci connection state
12358 *
12359 * This function is called after a PCI bus error affecting
12360 * this device has been detected.
12361 */
12362static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12363 pci_channel_state_t state)
12364{
12365 struct net_device *dev = pci_get_drvdata(pdev);
12366 struct bnx2x *bp = netdev_priv(dev);
12367
12368 rtnl_lock();
12369
12370 netif_device_detach(dev);
12371
Dean Nelson07ce50e2009-07-31 09:13:25 +000012372 if (state == pci_channel_io_perm_failure) {
12373 rtnl_unlock();
12374 return PCI_ERS_RESULT_DISCONNECT;
12375 }
12376
Wendy Xiong493adb12008-06-23 20:36:22 -070012377 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012378 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012379
12380 pci_disable_device(pdev);
12381
12382 rtnl_unlock();
12383
12384 /* Request a slot reset */
12385 return PCI_ERS_RESULT_NEED_RESET;
12386}
12387
12388/**
12389 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12390 * @pdev: Pointer to PCI device
12391 *
12392 * Restart the card from scratch, as if from a cold-boot.
12393 */
12394static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12395{
12396 struct net_device *dev = pci_get_drvdata(pdev);
12397 struct bnx2x *bp = netdev_priv(dev);
12398
12399 rtnl_lock();
12400
12401 if (pci_enable_device(pdev)) {
12402 dev_err(&pdev->dev,
12403 "Cannot re-enable PCI device after reset\n");
12404 rtnl_unlock();
12405 return PCI_ERS_RESULT_DISCONNECT;
12406 }
12407
12408 pci_set_master(pdev);
12409 pci_restore_state(pdev);
12410
12411 if (netif_running(dev))
12412 bnx2x_set_power_state(bp, PCI_D0);
12413
12414 rtnl_unlock();
12415
12416 return PCI_ERS_RESULT_RECOVERED;
12417}
12418
12419/**
12420 * bnx2x_io_resume - called when traffic can start flowing again
12421 * @pdev: Pointer to PCI device
12422 *
12423 * This callback is called when the error recovery driver tells us that
12424 * its OK to resume normal operation.
12425 */
12426static void bnx2x_io_resume(struct pci_dev *pdev)
12427{
12428 struct net_device *dev = pci_get_drvdata(pdev);
12429 struct bnx2x *bp = netdev_priv(dev);
12430
12431 rtnl_lock();
12432
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012433 bnx2x_eeh_recover(bp);
12434
Wendy Xiong493adb12008-06-23 20:36:22 -070012435 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012436 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012437
12438 netif_device_attach(dev);
12439
12440 rtnl_unlock();
12441}
12442
12443static struct pci_error_handlers bnx2x_err_handler = {
12444 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012445 .slot_reset = bnx2x_io_slot_reset,
12446 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012447};
12448
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012449static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012450 .name = DRV_MODULE_NAME,
12451 .id_table = bnx2x_pci_tbl,
12452 .probe = bnx2x_init_one,
12453 .remove = __devexit_p(bnx2x_remove_one),
12454 .suspend = bnx2x_suspend,
12455 .resume = bnx2x_resume,
12456 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012457};
12458
12459static int __init bnx2x_init(void)
12460{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012461 int ret;
12462
Eilon Greenstein938cf542009-08-12 08:23:37 +000012463 printk(KERN_INFO "%s", version);
12464
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012465 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12466 if (bnx2x_wq == NULL) {
12467 printk(KERN_ERR PFX "Cannot create workqueue\n");
12468 return -ENOMEM;
12469 }
12470
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012471 ret = pci_register_driver(&bnx2x_pci_driver);
12472 if (ret) {
12473 printk(KERN_ERR PFX "Cannot register driver\n");
12474 destroy_workqueue(bnx2x_wq);
12475 }
12476 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012477}
12478
12479static void __exit bnx2x_cleanup(void)
12480{
12481 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012482
12483 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012484}
12485
12486module_init(bnx2x_init);
12487module_exit(bnx2x_cleanup);
12488
Michael Chan993ac7b2009-10-10 13:46:56 +000012489#ifdef BCM_CNIC
12490
12491/* count denotes the number of new completions we have seen */
12492static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12493{
12494 struct eth_spe *spe;
12495
12496#ifdef BNX2X_STOP_ON_ERROR
12497 if (unlikely(bp->panic))
12498 return;
12499#endif
12500
12501 spin_lock_bh(&bp->spq_lock);
12502 bp->cnic_spq_pending -= count;
12503
12504 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12505 bp->cnic_spq_pending++) {
12506
12507 if (!bp->cnic_kwq_pending)
12508 break;
12509
12510 spe = bnx2x_sp_get_next(bp);
12511 *spe = *bp->cnic_kwq_cons;
12512
12513 bp->cnic_kwq_pending--;
12514
12515 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12516 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12517
12518 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12519 bp->cnic_kwq_cons = bp->cnic_kwq;
12520 else
12521 bp->cnic_kwq_cons++;
12522 }
12523 bnx2x_sp_prod_update(bp);
12524 spin_unlock_bh(&bp->spq_lock);
12525}
12526
12527static int bnx2x_cnic_sp_queue(struct net_device *dev,
12528 struct kwqe_16 *kwqes[], u32 count)
12529{
12530 struct bnx2x *bp = netdev_priv(dev);
12531 int i;
12532
12533#ifdef BNX2X_STOP_ON_ERROR
12534 if (unlikely(bp->panic))
12535 return -EIO;
12536#endif
12537
12538 spin_lock_bh(&bp->spq_lock);
12539
12540 for (i = 0; i < count; i++) {
12541 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12542
12543 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12544 break;
12545
12546 *bp->cnic_kwq_prod = *spe;
12547
12548 bp->cnic_kwq_pending++;
12549
12550 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12551 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12552 spe->data.mac_config_addr.hi,
12553 spe->data.mac_config_addr.lo,
12554 bp->cnic_kwq_pending);
12555
12556 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12557 bp->cnic_kwq_prod = bp->cnic_kwq;
12558 else
12559 bp->cnic_kwq_prod++;
12560 }
12561
12562 spin_unlock_bh(&bp->spq_lock);
12563
12564 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12565 bnx2x_cnic_sp_post(bp, 0);
12566
12567 return i;
12568}
12569
12570static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12571{
12572 struct cnic_ops *c_ops;
12573 int rc = 0;
12574
12575 mutex_lock(&bp->cnic_mutex);
12576 c_ops = bp->cnic_ops;
12577 if (c_ops)
12578 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12579 mutex_unlock(&bp->cnic_mutex);
12580
12581 return rc;
12582}
12583
12584static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12585{
12586 struct cnic_ops *c_ops;
12587 int rc = 0;
12588
12589 rcu_read_lock();
12590 c_ops = rcu_dereference(bp->cnic_ops);
12591 if (c_ops)
12592 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12593 rcu_read_unlock();
12594
12595 return rc;
12596}
12597
12598/*
12599 * for commands that have no data
12600 */
12601static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12602{
12603 struct cnic_ctl_info ctl = {0};
12604
12605 ctl.cmd = cmd;
12606
12607 return bnx2x_cnic_ctl_send(bp, &ctl);
12608}
12609
12610static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12611{
12612 struct cnic_ctl_info ctl;
12613
12614 /* first we tell CNIC and only then we count this as a completion */
12615 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12616 ctl.data.comp.cid = cid;
12617
12618 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12619 bnx2x_cnic_sp_post(bp, 1);
12620}
12621
12622static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12623{
12624 struct bnx2x *bp = netdev_priv(dev);
12625 int rc = 0;
12626
12627 switch (ctl->cmd) {
12628 case DRV_CTL_CTXTBL_WR_CMD: {
12629 u32 index = ctl->data.io.offset;
12630 dma_addr_t addr = ctl->data.io.dma_addr;
12631
12632 bnx2x_ilt_wr(bp, index, addr);
12633 break;
12634 }
12635
12636 case DRV_CTL_COMPLETION_CMD: {
12637 int count = ctl->data.comp.comp_count;
12638
12639 bnx2x_cnic_sp_post(bp, count);
12640 break;
12641 }
12642
12643 /* rtnl_lock is held. */
12644 case DRV_CTL_START_L2_CMD: {
12645 u32 cli = ctl->data.ring.client_id;
12646
12647 bp->rx_mode_cl_mask |= (1 << cli);
12648 bnx2x_set_storm_rx_mode(bp);
12649 break;
12650 }
12651
12652 /* rtnl_lock is held. */
12653 case DRV_CTL_STOP_L2_CMD: {
12654 u32 cli = ctl->data.ring.client_id;
12655
12656 bp->rx_mode_cl_mask &= ~(1 << cli);
12657 bnx2x_set_storm_rx_mode(bp);
12658 break;
12659 }
12660
12661 default:
12662 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12663 rc = -EINVAL;
12664 }
12665
12666 return rc;
12667}
12668
12669static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12670{
12671 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12672
12673 if (bp->flags & USING_MSIX_FLAG) {
12674 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12675 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12676 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12677 } else {
12678 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12679 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12680 }
12681 cp->irq_arr[0].status_blk = bp->cnic_sb;
12682 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12683 cp->irq_arr[1].status_blk = bp->def_status_blk;
12684 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12685
12686 cp->num_irq = 2;
12687}
12688
12689static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12690 void *data)
12691{
12692 struct bnx2x *bp = netdev_priv(dev);
12693 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12694
12695 if (ops == NULL)
12696 return -EINVAL;
12697
12698 if (atomic_read(&bp->intr_sem) != 0)
12699 return -EBUSY;
12700
12701 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12702 if (!bp->cnic_kwq)
12703 return -ENOMEM;
12704
12705 bp->cnic_kwq_cons = bp->cnic_kwq;
12706 bp->cnic_kwq_prod = bp->cnic_kwq;
12707 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12708
12709 bp->cnic_spq_pending = 0;
12710 bp->cnic_kwq_pending = 0;
12711
12712 bp->cnic_data = data;
12713
12714 cp->num_irq = 0;
12715 cp->drv_state = CNIC_DRV_STATE_REGD;
12716
12717 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12718
12719 bnx2x_setup_cnic_irq_info(bp);
12720 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12721 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12722 rcu_assign_pointer(bp->cnic_ops, ops);
12723
12724 return 0;
12725}
12726
12727static int bnx2x_unregister_cnic(struct net_device *dev)
12728{
12729 struct bnx2x *bp = netdev_priv(dev);
12730 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12731
12732 mutex_lock(&bp->cnic_mutex);
12733 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12734 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12735 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12736 }
12737 cp->drv_state = 0;
12738 rcu_assign_pointer(bp->cnic_ops, NULL);
12739 mutex_unlock(&bp->cnic_mutex);
12740 synchronize_rcu();
12741 kfree(bp->cnic_kwq);
12742 bp->cnic_kwq = NULL;
12743
12744 return 0;
12745}
12746
12747struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12748{
12749 struct bnx2x *bp = netdev_priv(dev);
12750 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12751
12752 cp->drv_owner = THIS_MODULE;
12753 cp->chip_id = CHIP_ID(bp);
12754 cp->pdev = bp->pdev;
12755 cp->io_base = bp->regview;
12756 cp->io_base2 = bp->doorbells;
12757 cp->max_kwqe_pending = 8;
12758 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12759 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12760 cp->ctx_tbl_len = CNIC_ILT_LINES;
12761 cp->starting_cid = BCM_CNIC_CID_START;
12762 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12763 cp->drv_ctl = bnx2x_drv_ctl;
12764 cp->drv_register_cnic = bnx2x_register_cnic;
12765 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12766
12767 return cp;
12768}
12769EXPORT_SYMBOL(bnx2x_cnic_probe);
12770
12771#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012772