blob: d7aef840a1d51904d378f871f52f7b3ff0b33bd3 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000060#define DRV_MODULE_VERSION "1.52.1-5"
Eilon Greenstein0ab365f2009-11-09 06:09:37 +000061#define DRV_MODULE_RELDATE "2009/11/09"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perchesad361c92009-07-06 13:05:40 -0700517 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perchesad361c92009-07-06 13:05:40 -0700519 printk(KERN_ERR PFX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800525 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800532 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perchesad361c92009-07-06 13:05:40 -0700534 printk(KERN_ERR PFX "end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 u8 storm, u16 index, u8 op, u8 update)
769{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
Eilon Greenstein5c862842008-08-13 15:51:48 -0700781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788}
789
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791{
792 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200793
794 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200797}
798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804
Eilon Greenstein5c862842008-08-13 15:51:48 -0700805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000821}
822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200834 int nbd;
835
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847
Eilon Greensteinca003922009-08-12 22:53:28 -0700848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 bnx2x_panic();
853 }
854#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700855 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700882 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000883 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888}
889
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892 s16 used;
893 u16 prod;
894 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200903
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700904#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700908#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911}
912
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924{
925 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000926 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000931 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932#endif
933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000934 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200946 hw_cons, sw_cons, pkt_cons);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000961 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200962
Eilon Greenstein60447352009-03-02 07:59:24 +0000963 /* Need to make the tx_bd_cons update visible to start_xmit()
964 * before checking for netif_tx_queue_stopped(). Without the
965 * memory barrier, there is a small possibility that
966 * start_xmit() will miss it and cause the queue to be stopped
967 * forever.
968 */
969 smp_mb();
970
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000971 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700972 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000974 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000976 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200977}
978
Michael Chan993ac7b2009-10-10 13:46:56 +0000979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700982
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984 union eth_rx_cqe *rr_cqe)
985{
986 struct bnx2x *bp = fp->bp;
987 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700990 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200991 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000992 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700993 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200994
995 bp->spq_left++;
996
Eilon Greenstein0626b892009-02-12 08:38:14 +0000997 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200998 switch (command | fp->state) {
999 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000 BNX2X_FP_STATE_OPENING):
1001 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002 cid);
1003 fp->state = BNX2X_FP_STATE_OPEN;
1004 break;
1005
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008 cid);
1009 fp->state = BNX2X_FP_STATE_HALTED;
1010 break;
1011
1012 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013 BNX2X_ERR("unexpected MC reply (%d) "
1014 "fp->state is %x\n", command, fp->state);
1015 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001016 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001017 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001018 return;
1019 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001021 switch (command | bp->state) {
1022 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024 bp->state = BNX2X_STATE_OPEN;
1025 break;
1026
1027 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030 fp->state = BNX2X_FP_STATE_HALTED;
1031 break;
1032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036 break;
1037
Michael Chan993ac7b2009-10-10 13:46:56 +00001038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001044
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001048 bp->set_mac_pending--;
1049 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001050 break;
1051
Eliezer Tamir49d66772008-02-28 11:53:13 -08001052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001054 bp->set_mac_pending--;
1055 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001056 break;
1057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001061 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001063 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001064}
1065
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001066static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1068{
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct page *page = sw_buf->page;
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073 /* Skip "next page" elements */
1074 if (!page)
1075 return;
1076
1077 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001078 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081 sw_buf->page = NULL;
1082 sge->addr_hi = 0;
1083 sge->addr_lo = 0;
1084}
1085
1086static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, int last)
1088{
1089 int i;
1090
1091 for (i = 0; i < last; i++)
1092 bnx2x_free_rx_sge(bp, fp, i);
1093}
1094
1095static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096 struct bnx2x_fastpath *fp, u16 index)
1097{
1098 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101 dma_addr_t mapping;
1102
1103 if (unlikely(page == NULL))
1104 return -ENOMEM;
1105
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001106 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001107 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001108 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001109 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110 return -ENOMEM;
1111 }
1112
1113 sw_buf->page = page;
1114 pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119 return 0;
1120}
1121
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123 struct bnx2x_fastpath *fp, u16 index)
1124{
1125 struct sk_buff *skb;
1126 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128 dma_addr_t mapping;
1129
1130 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131 if (unlikely(skb == NULL))
1132 return -ENOMEM;
1133
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001134 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001136 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 dev_kfree_skb(skb);
1138 return -ENOMEM;
1139 }
1140
1141 rx_buf->skb = skb;
1142 pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147 return 0;
1148}
1149
1150/* note that we are not allocating a new skb,
1151 * we are just moving one from cons to prod
1152 * we are not creating a new mapping,
1153 * so there is no need to check for dma_mapping_error().
1154 */
1155static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156 struct sk_buff *skb, u16 cons, u16 prod)
1157{
1158 struct bnx2x *bp = fp->bp;
1159 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164 pci_dma_sync_single_for_device(bp->pdev,
1165 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001166 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001167
1168 prod_rx_buf->skb = cons_rx_buf->skb;
1169 pci_unmap_addr_set(prod_rx_buf, mapping,
1170 pci_unmap_addr(cons_rx_buf, mapping));
1171 *prod_bd = *cons_bd;
1172}
1173
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001174static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175 u16 idx)
1176{
1177 u16 last_max = fp->last_max_sge;
1178
1179 if (SUB_S16(idx, last_max) > 0)
1180 fp->last_max_sge = idx;
1181}
1182
1183static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184{
1185 int i, j;
1186
1187 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188 int idx = RX_SGE_CNT * i - 1;
1189
1190 for (j = 0; j < 2; j++) {
1191 SGE_MASK_CLEAR_BIT(fp, idx);
1192 idx--;
1193 }
1194 }
1195}
1196
1197static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198 struct eth_fast_path_rx_cqe *fp_cqe)
1199{
1200 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001201 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001202 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001203 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001204 u16 last_max, last_elem, first_elem;
1205 u16 delta = 0;
1206 u16 i;
1207
1208 if (!sge_len)
1209 return;
1210
1211 /* First mark all used pages */
1212 for (i = 0; i < sge_len; i++)
1213 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218 /* Here we assume that the last SGE index is the biggest */
1219 prefetch((void *)(fp->sge_mask));
1220 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222 last_max = RX_SGE(fp->last_max_sge);
1223 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226 /* If ring is not full */
1227 if (last_elem + 1 != first_elem)
1228 last_elem++;
1229
1230 /* Now update the prod */
1231 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232 if (likely(fp->sge_mask[i]))
1233 break;
1234
1235 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236 delta += RX_SGE_MASK_ELEM_SZ;
1237 }
1238
1239 if (delta > 0) {
1240 fp->rx_sge_prod += delta;
1241 /* clear page-end entries */
1242 bnx2x_clear_sge_mask_next_elems(fp);
1243 }
1244
1245 DP(NETIF_MSG_RX_STATUS,
1246 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1247 fp->last_max_sge, fp->rx_sge_prod);
1248}
1249
1250static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251{
1252 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253 memset(fp->sge_mask, 0xff,
1254 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
Eilon Greenstein33471622008-08-13 15:59:08 -07001256 /* Clear the two last indices in the page to 1:
1257 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001258 hence will never be indicated and should be removed from
1259 the calculations. */
1260 bnx2x_clear_sge_mask_next_elems(fp);
1261}
1262
1263static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264 struct sk_buff *skb, u16 cons, u16 prod)
1265{
1266 struct bnx2x *bp = fp->bp;
1267 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270 dma_addr_t mapping;
1271
1272 /* move empty skb from pool to prod and map it */
1273 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001275 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001276 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278 /* move partial skb from cons to pool (don't unmap yet) */
1279 fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281 /* mark bin state as start - print error if current state != stop */
1282 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285 fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287 /* point prod_bd to new skb */
1288 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291#ifdef BNX2X_STOP_ON_ERROR
1292 fp->tpa_queue_used |= (1 << queue);
1293#ifdef __powerpc64__
1294 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295#else
1296 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297#endif
1298 fp->tpa_queue_used);
1299#endif
1300}
1301
1302static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303 struct sk_buff *skb,
1304 struct eth_fast_path_rx_cqe *fp_cqe,
1305 u16 cqe_idx)
1306{
1307 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001308 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309 u32 i, frag_len, frag_size, pages;
1310 int err;
1311 int j;
1312
1313 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001314 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001315
1316 /* This is needed in order to enable forwarding support */
1317 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001318 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001319 max(frag_size, (u32)len_on_bd));
1320
1321#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001322 if (pages >
1323 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001324 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325 pages, cqe_idx);
1326 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1327 fp_cqe->pkt_len, len_on_bd);
1328 bnx2x_panic();
1329 return -EINVAL;
1330 }
1331#endif
1332
1333 /* Run through the SGL and compose the fragmented skb */
1334 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337 /* FW gives the indices of the SGE as if the ring is an array
1338 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001339 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001340 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001341 old_rx_pg = *rx_pg;
1342
1343 /* If we fail to allocate a substitute page, we simply stop
1344 where we are and drop the whole packet */
1345 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001347 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001348 return err;
1349 }
1350
1351 /* Unmap the page as we r going to pass it to the stack */
1352 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001353 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354
1355 /* Add one frag and update the appropriate fields in the skb */
1356 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358 skb->data_len += frag_len;
1359 skb->truesize += frag_len;
1360 skb->len += frag_len;
1361
1362 frag_size -= frag_len;
1363 }
1364
1365 return 0;
1366}
1367
1368static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370 u16 cqe_idx)
1371{
1372 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373 struct sk_buff *skb = rx_buf->skb;
1374 /* alloc new skb */
1375 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377 /* Unmap skb in the pool anyway, as we are going to change
1378 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379 fails. */
1380 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001381 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001382
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001383 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001384 /* fix ip xsum and give it to the stack */
1385 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001386#ifdef BCM_VLAN
1387 int is_vlan_cqe =
1388 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389 PARSING_FLAGS_VLAN);
1390 int is_not_hwaccel_vlan_cqe =
1391 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001393
1394 prefetch(skb);
1395 prefetch(((char *)(skb)) + 128);
1396
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001397#ifdef BNX2X_STOP_ON_ERROR
1398 if (pad + len > bp->rx_buf_size) {
1399 BNX2X_ERR("skb_put is about to fail... "
1400 "pad %d len %d rx_buf_size %d\n",
1401 pad, len, bp->rx_buf_size);
1402 bnx2x_panic();
1403 return;
1404 }
1405#endif
1406
1407 skb_reserve(skb, pad);
1408 skb_put(skb, len);
1409
1410 skb->protocol = eth_type_trans(skb, bp->dev);
1411 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413 {
1414 struct iphdr *iph;
1415
1416 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001417#ifdef BCM_VLAN
1418 /* If there is no Rx VLAN offloading -
1419 take VLAN tag into an account */
1420 if (unlikely(is_not_hwaccel_vlan_cqe))
1421 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001423 iph->check = 0;
1424 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425 }
1426
1427 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428 &cqe->fast_path_cqe, cqe_idx)) {
1429#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001430 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001432 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433 le16_to_cpu(cqe->fast_path_cqe.
1434 vlan_tag));
1435 else
1436#endif
1437 netif_receive_skb(skb);
1438 } else {
1439 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440 " - dropping packet!\n");
1441 dev_kfree_skb(skb);
1442 }
1443
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001444
1445 /* put new skb in bin */
1446 fp->tpa_pool[queue].skb = new_skb;
1447
1448 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001449 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001450 DP(NETIF_MSG_RX_STATUS,
1451 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001452 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001453 }
1454
1455 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456}
1457
1458static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459 struct bnx2x_fastpath *fp,
1460 u16 bd_prod, u16 rx_comp_prod,
1461 u16 rx_sge_prod)
1462{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001463 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001464 int i;
1465
1466 /* Update producers */
1467 rx_prods.bd_prod = bd_prod;
1468 rx_prods.cqe_prod = rx_comp_prod;
1469 rx_prods.sge_prod = rx_sge_prod;
1470
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001471 /*
1472 * Make sure that the BD and SGE data is updated before updating the
1473 * producers since FW might read the BD/SGE right after the producer
1474 * is updated.
1475 * This is only applicable for weak-ordered memory model archs such
1476 * as IA-64. The following barrier is also mandatory since FW will
1477 * assumes BDs must have buffers.
1478 */
1479 wmb();
1480
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001481 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001483 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001484 ((u32 *)&rx_prods)[i]);
1485
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001486 mmiowb(); /* keep prod updates ordered */
1487
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001488 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001489 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1490 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001491}
1492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001493static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494{
1495 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001496 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001497 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498 int rx_pkt = 0;
1499
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (unlikely(bp->panic))
1502 return 0;
1503#endif
1504
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001505 /* CQ "next element" is of the size of the regular element,
1506 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001507 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509 hw_comp_cons++;
1510
1511 bd_cons = fp->rx_bd_cons;
1512 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001513 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001514 sw_comp_cons = fp->rx_comp_cons;
1515 sw_comp_prod = fp->rx_comp_prod;
1516
1517 /* Memory barrier necessary as speculative reads of the rx
1518 * buffer can be ahead of the index in the status block
1519 */
1520 rmb();
1521
1522 DP(NETIF_MSG_RX_STATUS,
1523 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001524 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001525
1526 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001527 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001528 struct sk_buff *skb;
1529 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001530 u8 cqe_fp_flags;
1531 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001532
1533 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534 bd_prod = RX_BD(bd_prod);
1535 bd_cons = RX_BD(bd_cons);
1536
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001537 /* Prefetch the page containing the BD descriptor
1538 at producer's index. It will be needed when new skb is
1539 allocated */
1540 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541 (&fp->rx_desc_ring[bd_prod])) -
1542 PAGE_SIZE + 1));
1543
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001544 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001545 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001548 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1549 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001550 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001551 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001553
1554 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001555 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001556 bnx2x_sp_event(fp, cqe);
1557 goto next_cqe;
1558
1559 /* this is an rx packet */
1560 } else {
1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1562 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566 pad = cqe->fast_path_cqe.placement_offset;
1567
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001568 /* If CQE is marked both TPA_START and TPA_END
1569 it is a non-TPA CQE */
1570 if ((!fp->disable_tpa) &&
1571 (TPA_TYPE(cqe_fp_flags) !=
1572 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001573 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001574
1575 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576 DP(NETIF_MSG_RX_STATUS,
1577 "calling tpa_start on queue %d\n",
1578 queue);
1579
1580 bnx2x_tpa_start(fp, queue, skb,
1581 bd_cons, bd_prod);
1582 goto next_rx;
1583 }
1584
1585 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586 DP(NETIF_MSG_RX_STATUS,
1587 "calling tpa_stop on queue %d\n",
1588 queue);
1589
1590 if (!BNX2X_RX_SUM_FIX(cqe))
1591 BNX2X_ERR("STOP on none TCP "
1592 "data\n");
1593
1594 /* This is a size of the linear data
1595 on this skb */
1596 len = le16_to_cpu(cqe->fast_path_cqe.
1597 len_on_bd);
1598 bnx2x_tpa_stop(bp, fp, queue, pad,
1599 len, cqe, comp_ring_cons);
1600#ifdef BNX2X_STOP_ON_ERROR
1601 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001602 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001603#endif
1604
1605 bnx2x_update_sge_prod(fp,
1606 &cqe->fast_path_cqe);
1607 goto next_cqe;
1608 }
1609 }
1610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001611 pci_dma_sync_single_for_device(bp->pdev,
1612 pci_unmap_addr(rx_buf, mapping),
1613 pad + RX_COPY_THRESH,
1614 PCI_DMA_FROMDEVICE);
1615 prefetch(skb);
1616 prefetch(((char *)(skb)) + 128);
1617
1618 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001619 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001621 "ERROR flags %x rx packet %u\n",
1622 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001623 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 goto reuse_rx;
1625 }
1626
1627 /* Since we don't have a jumbo ring
1628 * copy small packets if mtu > 1500
1629 */
1630 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631 (len <= RX_COPY_THRESH)) {
1632 struct sk_buff *new_skb;
1633
1634 new_skb = netdev_alloc_skb(bp->dev,
1635 len + pad);
1636 if (new_skb == NULL) {
1637 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001638 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001639 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001640 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001641 goto reuse_rx;
1642 }
1643
1644 /* aligned copy */
1645 skb_copy_from_linear_data_offset(skb, pad,
1646 new_skb->data + pad, len);
1647 skb_reserve(new_skb, pad);
1648 skb_put(new_skb, len);
1649
1650 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652 skb = new_skb;
1653
Eilon Greensteina119a062009-08-12 08:23:23 +00001654 } else
1655 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001656 pci_unmap_single(bp->pdev,
1657 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001658 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001659 PCI_DMA_FROMDEVICE);
1660 skb_reserve(skb, pad);
1661 skb_put(skb, len);
1662
1663 } else {
1664 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001665 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001666 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001667 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001668reuse_rx:
1669 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670 goto next_rx;
1671 }
1672
1673 skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001676 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001677 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001679 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001680 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001682 }
1683
Eilon Greenstein748e5432009-02-12 08:36:37 +00001684 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001686#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001687 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001688 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001690 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692 else
1693#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001694 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001696
1697next_rx:
1698 rx_buf->skb = NULL;
1699
1700 bd_cons = NEXT_RX_IDX(bd_cons);
1701 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001702 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001704next_cqe:
1705 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001708 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001709 break;
1710 } /* while */
1711
1712 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001713 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001714 fp->rx_comp_cons = sw_comp_cons;
1715 fp->rx_comp_prod = sw_comp_prod;
1716
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001717 /* Update producers */
1718 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
1721 fp->rx_pkt += rx_pkt;
1722 fp->rx_calls++;
1723
1724 return rx_pkt;
1725}
1726
1727static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728{
1729 struct bnx2x_fastpath *fp = fp_cookie;
1730 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001731
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001732 /* Return here if interrupt is disabled */
1733 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735 return IRQ_HANDLED;
1736 }
1737
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001738 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001739 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001740 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001741
1742#ifdef BNX2X_STOP_ON_ERROR
1743 if (unlikely(bp->panic))
1744 return IRQ_HANDLED;
1745#endif
1746
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001747 /* Handle Rx and Tx according to MSI-X vector */
1748 prefetch(fp->rx_cons_sb);
1749 prefetch(fp->tx_cons_sb);
1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001753
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754 return IRQ_HANDLED;
1755}
1756
1757static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001759 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001760 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001761 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001762 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001763
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001764 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001765 if (unlikely(status == 0)) {
1766 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767 return IRQ_NONE;
1768 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001769 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001771 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001772 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774 return IRQ_HANDLED;
1775 }
1776
Eilon Greenstein3196a882008-08-13 15:58:49 -07001777#ifdef BNX2X_STOP_ON_ERROR
1778 if (unlikely(bp->panic))
1779 return IRQ_HANDLED;
1780#endif
1781
Eilon Greensteinca003922009-08-12 22:53:28 -07001782 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001784
Eilon Greensteinca003922009-08-12 22:53:28 -07001785 mask = 0x2 << fp->sb_id;
1786 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001787 /* Handle Rx and Tx according to SB id */
1788 prefetch(fp->rx_cons_sb);
1789 prefetch(&fp->status_blk->u_status_block.
1790 status_block_index);
1791 prefetch(fp->tx_cons_sb);
1792 prefetch(&fp->status_blk->c_status_block.
1793 status_block_index);
1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001795 status &= ~mask;
1796 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797 }
1798
Michael Chan993ac7b2009-10-10 13:46:56 +00001799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001814 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001825
1826 return IRQ_HANDLED;
1827}
1828
1829/* end of fast path */
1830
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001832
1833/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834
1835/*
1836 * General service functions
1837 */
1838
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001840{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001845 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001846
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
1854
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
Eliezer Tamirf1410642008-02-28 11:51:50 -08001862 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001863 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
1869
Eilon Greenstein46230472008-08-25 15:23:30 -07001870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001872 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001875 if (lock_status & resource_bit)
1876 return 0;
1877
1878 msleep(5);
1879 }
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
1883
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001890
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
Eliezer Tamirf1410642008-02-28 11:51:50 -08001906 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001907 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
1912 }
1913
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001914 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001915 return 0;
1916}
1917
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001918/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001920{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001921 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001922
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001925}
1926
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001927static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001928{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001932 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001933}
1934
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
1974
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
1979
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
1992
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
2000
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
2007
2008 default:
2009 break;
2010 }
2011
2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002014
2015 return 0;
2016}
2017
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
Eliezer Tamirf1410642008-02-28 11:51:50 -08002064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065{
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
2068
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
2073 }
2074
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
2086
Eilon Greenstein6378c022008-08-13 15:59:25 -07002087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
2093
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
2099
2100 default:
2101 break;
2102 }
2103
2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002106
2107 return 0;
2108}
2109
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002111{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002116 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002117 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002118
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002121 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002122 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002123
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002125 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002126 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002127
Eliezer Tamirf1410642008-02-28 11:51:50 -08002128 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002131 break;
2132 }
2133}
2134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002135static void bnx2x_link_report(struct bnx2x *bp)
2136{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002137 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2141 }
2142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002144 u16 line_speed;
2145
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002146 if (bp->state == BNX2X_STATE_OPEN)
2147 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
2160 printk("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002161
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002162 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002163 printk("full duplex");
2164 else
2165 printk("half duplex");
2166
David S. Millerc0700f92008-12-16 23:53:20 -08002167 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002170 if (bp->link_vars.flow_ctrl &
2171 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002172 printk("& transmit ");
2173 } else {
2174 printk(", transmit ");
2175 }
2176 printk("flow control ON");
2177 }
2178 printk("\n");
2179
2180 } else { /* link_down */
2181 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002182 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002183 }
2184}
2185
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002186static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002187{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002188 if (!BP_NOMCP(bp)) {
2189 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
Eilon Greenstein19680c42008-08-13 15:47:33 -07002191 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002192 /* It is recommended to turn off RX FC for jumbo frames
2193 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002194 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002195 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002196 else
David S. Millerc0700f92008-12-16 23:53:20 -08002197 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002198
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002199 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002200
2201 if (load_mode == LOAD_DIAG)
2202 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
Eilon Greenstein19680c42008-08-13 15:47:33 -07002204 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002205
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002206 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002207
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002208 bnx2x_calc_fc_adv(bp);
2209
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002210 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002212 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002213 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002214
Eilon Greenstein19680c42008-08-13 15:47:33 -07002215 return rc;
2216 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002217 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002218 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002219}
2220
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002221static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002222{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002223 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002224 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002225 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002226 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002227
Eilon Greenstein19680c42008-08-13 15:47:33 -07002228 bnx2x_calc_fc_adv(bp);
2229 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002230 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002231}
2232
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002233static void bnx2x__link_reset(struct bnx2x *bp)
2234{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002235 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002236 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002237 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002238 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002239 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002240 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002241}
2242
2243static u8 bnx2x_link_test(struct bnx2x *bp)
2244{
2245 u8 rc;
2246
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002247 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002248 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002249 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002250
2251 return rc;
2252}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002253
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002254static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002255{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002256 u32 r_param = bp->link_vars.line_speed / 8;
2257 u32 fair_periodic_timeout_usec;
2258 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002259
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002260 memset(&(bp->cmng.rs_vars), 0,
2261 sizeof(struct rate_shaping_vars_per_port));
2262 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002263
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002264 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002266
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002267 /* this is the threshold below which no timer arming will occur
2268 1.25 coefficient is for the threshold to be a little bigger
2269 than the real time, to compensate for timer in-accuracy */
2270 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002271 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002273 /* resolution of fairness timer */
2274 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002277
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002278 /* this is the threshold below which we won't arm the timer anymore */
2279 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002280
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002281 /* we multiply by 1e3/8 to get bytes/msec.
2282 We don't want the credits to pass a credit
2283 of the t_fair*FAIR_MEM (algorithm resolution) */
2284 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285 /* since each tick is 4 usec */
2286 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002287}
2288
Eilon Greenstein2691d512009-08-12 08:22:08 +00002289/* Calculates the sum of vn_min_rates.
2290 It's needed for further normalizing of the min_rates.
2291 Returns:
2292 sum of vn_min_rates.
2293 or
2294 0 - if all the min_rates are 0.
2295 In the later case fainess algorithm should be deactivated.
2296 If not all min_rates are zero then those that are zeroes will be set to 1.
2297 */
2298static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299{
2300 int all_zero = 1;
2301 int port = BP_PORT(bp);
2302 int vn;
2303
2304 bp->vn_weight_sum = 0;
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306 int func = 2*vn + port;
2307 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311 /* Skip hidden vns */
2312 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313 continue;
2314
2315 /* If min rate is zero - set it to 1 */
2316 if (!vn_min_rate)
2317 vn_min_rate = DEF_MIN_RATE;
2318 else
2319 all_zero = 0;
2320
2321 bp->vn_weight_sum += vn_min_rate;
2322 }
2323
2324 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002325 if (all_zero) {
2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002333}
2334
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336{
2337 struct rate_shaping_vars_per_vn m_rs_vn;
2338 struct fairness_vars_per_vn m_fair_vn;
2339 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340 u16 vn_min_rate, vn_max_rate;
2341 int i;
2342
2343 /* If function is hidden - set min and max to zeroes */
2344 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345 vn_min_rate = 0;
2346 vn_max_rate = 0;
2347
2348 } else {
2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002351 /* If min rate is zero - set it to 1 */
2352 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002353 vn_min_rate = DEF_MIN_RATE;
2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002357 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002360
2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364 /* global vn counter - maximal Mbps for this vn */
2365 m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367 /* quota - number of bytes transmitted in this period */
2368 m_rs_vn.vn_counter.quota =
2369 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002371 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002372 /* credit for each period of the fairness algorithm:
2373 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002374 vn_weight_sum should not be larger than 10000, thus
2375 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002377 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002378 max((u32)(vn_min_rate * (T_FAIR_COEF /
2379 (8 * bp->vn_weight_sum))),
2380 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002381 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382 m_fair_vn.vn_credit_delta);
2383 }
2384
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385 /* Store it to internal memory */
2386 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389 ((u32 *)(&m_rs_vn))[i]);
2390
2391 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394 ((u32 *)(&m_fair_vn))[i]);
2395}
2396
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002397
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002398/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002399static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002400{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002401 /* Make sure that we are synced with the current statistics */
2402 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002404 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002406 if (bp->link_vars.link_up) {
2407
Eilon Greenstein1c063282009-02-12 08:36:43 +00002408 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002409 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002410 int port = BP_PORT(bp);
2411 u32 pause_enabled = 0;
2412
2413 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414 pause_enabled = 1;
2415
2416 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002417 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002418 pause_enabled);
2419 }
2420
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002421 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422 struct host_port_stats *pstats;
2423
2424 pstats = bnx2x_sp(bp, port_stats);
2425 /* reset old bmac stats */
2426 memset(&(pstats->mac_stx[0]), 0,
2427 sizeof(struct mac_stx));
2428 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002429 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431 }
2432
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002433 /* indicate link status */
2434 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002435
2436 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002437 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002438 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002439 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002440
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002441 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002442 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443 if (vn == BP_E1HVN(bp))
2444 continue;
2445
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002446 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002447 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002450
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002451 if (bp->link_vars.link_up) {
2452 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002453
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002454 /* Init rate shaping and fairness contexts */
2455 bnx2x_init_port_minmax(bp);
2456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002457 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002458 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460 /* Store it to internal memory */
2461 for (i = 0;
2462 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465 ((u32 *)(&bp->cmng))[i]);
2466 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002467 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002468}
2469
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002470static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002471{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002473 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002474
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002477 if (bp->link_vars.link_up)
2478 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479 else
2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
Eilon Greenstein2691d512009-08-12 08:22:08 +00002482 bnx2x_calc_vn_weight_sum(bp);
2483
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002484 /* indicate link status */
2485 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002486}
2487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002488static void bnx2x_pmf_update(struct bnx2x *bp)
2489{
2490 int port = BP_PORT(bp);
2491 u32 val;
2492
2493 bp->port.pmf = 1;
2494 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496 /* enable nig attention */
2497 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002500
2501 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002502}
2503
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002504/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002505
2506/* slow path */
2507
2508/*
2509 * General service functions
2510 */
2511
Eilon Greenstein2691d512009-08-12 08:22:08 +00002512/* send the MCP a request, block until there is a reply */
2513u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514{
2515 int func = BP_FUNC(bp);
2516 u32 seq = ++bp->fw_seq;
2517 u32 rc = 0;
2518 u32 cnt = 1;
2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002521 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525 do {
2526 /* let the FW do it's magic ... */
2527 msleep(delay);
2528
2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002531 /* Give the FW up to 5 second (500*10ms) */
2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002533
2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535 cnt*delay, rc, seq);
2536
2537 /* is this a reply to our command? */
2538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539 rc &= FW_MSG_CODE_MASK;
2540 else {
2541 /* FW BUG! */
2542 BNX2X_ERR("FW failed to respond!\n");
2543 bnx2x_fw_dump(bp);
2544 rc = 0;
2545 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002546 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002547
2548 return rc;
2549}
2550
2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002553static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2556{
2557 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002558
2559 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563 netif_carrier_off(bp->dev);
2564}
2565
2566static void bnx2x_e1h_enable(struct bnx2x *bp)
2567{
2568 int port = BP_PORT(bp);
2569
2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
Eilon Greenstein2691d512009-08-12 08:22:08 +00002572 /* Tx queue should be only reenabled */
2573 netif_tx_wake_all_queues(bp->dev);
2574
Eilon Greenstein061bc702009-10-15 00:18:47 -07002575 /*
2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002579}
2580
2581static void bnx2x_update_min_max(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
2584 int vn, i;
2585
2586 /* Init rate shaping and fairness contexts */
2587 bnx2x_init_port_minmax(bp);
2588
2589 bnx2x_calc_vn_weight_sum(bp);
2590
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594 if (bp->port.pmf) {
2595 int func;
2596
2597 /* Set the attention towards other drivers on the same port */
2598 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599 if (vn == BP_E1HVN(bp))
2600 continue;
2601
2602 func = ((vn << 1) | port);
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605 }
2606
2607 /* Store it to internal memory */
2608 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609 REG_WR(bp, BAR_XSTRORM_INTMEM +
2610 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611 ((u32 *)(&bp->cmng))[i]);
2612 }
2613}
2614
2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002628 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002629
2630 bnx2x_e1h_disable(bp);
2631 } else {
2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002633 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002634
2635 bnx2x_e1h_enable(bp);
2636 }
2637 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638 }
2639 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641 bnx2x_update_min_max(bp);
2642 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643 }
2644
2645 /* Report results to MCP */
2646 if (dcc_event)
2647 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648 else
2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650}
2651
Michael Chan28912902009-10-10 13:46:53 +00002652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002681/* the slow path queue is odd since completions arrive on the fastpath ring */
2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683 u32 data_hi, u32 data_lo, int common)
2684{
Michael Chan28912902009-10-10 13:46:53 +00002685 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002689 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693#ifdef BNX2X_STOP_ON_ERROR
2694 if (unlikely(bp->panic))
2695 return -EIO;
2696#endif
2697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002698 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699
2700 if (!bp->spq_left) {
2701 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002702 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002703 bnx2x_panic();
2704 return -EBUSY;
2705 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002706
Michael Chan28912902009-10-10 13:46:53 +00002707 spe = bnx2x_sp_get_next(bp);
2708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002710 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002714 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002715 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
Michael Chan28912902009-10-10 13:46:53 +00002718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720
2721 bp->spq_left--;
2722
Michael Chan28912902009-10-10 13:46:53 +00002723 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002724 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002725 return 0;
2726}
2727
2728/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002729static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002732 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733
2734 might_sleep();
2735 i = 100;
2736 for (j = 0; j < i*10; j++) {
2737 val = (1UL << 31);
2738 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740 if (val & (1L << 31))
2741 break;
2742
2743 msleep(5);
2744 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002745 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002746 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 rc = -EBUSY;
2748 }
2749
2750 return rc;
2751}
2752
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002753/* release split MCP access lock register */
2754static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002755{
2756 u32 val = 0;
2757
2758 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759}
2760
2761static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762{
2763 struct host_def_status_block *def_sb = bp->def_status_blk;
2764 u16 rc = 0;
2765
2766 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002767 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769 rc |= 1;
2770 }
2771 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773 rc |= 2;
2774 }
2775 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777 rc |= 4;
2778 }
2779 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781 rc |= 8;
2782 }
2783 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785 rc |= 16;
2786 }
2787 return rc;
2788}
2789
2790/*
2791 * slow path service functions
2792 */
2793
2794static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002796 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002797 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002801 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002803 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002804 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002805
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806 if (bp->attn_state & asserted)
2807 BNX2X_ERR("IGU ERROR\n");
2808
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002809 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810 aeu_mask = REG_RD(bp, aeu_addr);
2811
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002813 aeu_mask, asserted);
2814 aeu_mask &= ~(asserted & 0xff);
2815 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002817 REG_WR(bp, aeu_addr, aeu_mask);
2818 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002819
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002820 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002822 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002823
2824 if (asserted & ATTN_HARD_WIRED_MASK) {
2825 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002826
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002827 bnx2x_acquire_phy_lock(bp);
2828
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002829 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002830 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002831 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002833 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002834
2835 /* handle unicore attn? */
2836 }
2837 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840 if (asserted & GPIO_2_FUNC)
2841 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843 if (asserted & GPIO_3_FUNC)
2844 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846 if (asserted & GPIO_4_FUNC)
2847 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849 if (port == 0) {
2850 if (asserted & ATTN_GENERAL_ATTN_1) {
2851 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853 }
2854 if (asserted & ATTN_GENERAL_ATTN_2) {
2855 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857 }
2858 if (asserted & ATTN_GENERAL_ATTN_3) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861 }
2862 } else {
2863 if (asserted & ATTN_GENERAL_ATTN_4) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_5) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_6) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874 }
2875 }
2876
2877 } /* if hardwired */
2878
Eilon Greenstein5c862842008-08-13 15:51:48 -07002879 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880 asserted, hc_addr);
2881 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002882
2883 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002884 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002885 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002886 bnx2x_release_phy_lock(bp);
2887 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002888}
2889
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002890static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891{
2892 int port = BP_PORT(bp);
2893
2894 /* mark the failure */
2895 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898 bp->link_params.ext_phy_config);
2899
2900 /* log the failure */
2901 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902 " the driver to shutdown the card to prevent permanent"
2903 " damage. Please contact Dell Support for assistance\n",
2904 bp->dev->name);
2905}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002906
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002907static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002909 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002910 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002911 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002912
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002913 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002915
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002916 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002917
2918 val = REG_RD(bp, reg_offset);
2919 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920 REG_WR(bp, reg_offset, val);
2921
2922 BNX2X_ERR("SPIO5 hw attention\n");
2923
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002924 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002925 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002927 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002928 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002929 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002930 /* The PHY reset is controlled by GPIO 1 */
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002933 break;
2934
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002935 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936 /* The PHY reset is controlled by GPIO 1 */
2937 /* fake the port number to cancel the swap done in
2938 set_gpio() */
2939 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941 port = (swap_val && swap_override) ^ 1;
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944 break;
2945
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002946 default:
2947 break;
2948 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002949 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002950 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002951
Eilon Greenstein589abe32009-02-12 08:36:55 +00002952 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954 bnx2x_acquire_phy_lock(bp);
2955 bnx2x_handle_module_detect_int(&bp->link_params);
2956 bnx2x_release_phy_lock(bp);
2957 }
2958
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002959 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961 val = REG_RD(bp, reg_offset);
2962 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963 REG_WR(bp, reg_offset, val);
2964
2965 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002966 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002967 bnx2x_panic();
2968 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002969}
2970
2971static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972{
2973 u32 val;
2974
Eilon Greenstein0626b892009-02-12 08:38:14 +00002975 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002976
2977 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979 /* DORQ discard attention */
2980 if (val & 0x2)
2981 BNX2X_ERR("FATAL error from DORQ\n");
2982 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002983
2984 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2988
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002997 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002998 bnx2x_panic();
2999 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003000}
3001
3002static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003{
3004 u32 val;
3005
3006 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010 /* CFC error attention */
3011 if (val & 0x2)
3012 BNX2X_ERR("FATAL error from CFC\n");
3013 }
3014
3015 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019 /* RQ_USDMDP_FIFO_OVERFLOW */
3020 if (val & 0x18000)
3021 BNX2X_ERR("FATAL error from PXP\n");
3022 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003023
3024 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026 int port = BP_PORT(bp);
3027 int reg_offset;
3028
3029 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032 val = REG_RD(bp, reg_offset);
3033 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034 REG_WR(bp, reg_offset, val);
3035
3036 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003037 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003038 bnx2x_panic();
3039 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003040}
3041
3042static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003044 u32 val;
3045
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003046 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003048 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049 int func = BP_FUNC(bp);
3050
3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3056 bnx2x_dcc_event(bp,
3057 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003058 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003059 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003060 bnx2x_pmf_update(bp);
3061
3062 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003063
3064 BNX2X_ERR("MC assert!\n");
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069 bnx2x_panic();
3070
3071 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073 BNX2X_ERR("MCP assert!\n");
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003075 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003076
3077 } else
3078 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079 }
3080
3081 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003082 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083 if (attn & BNX2X_GRC_TIMEOUT) {
3084 val = CHIP_IS_E1H(bp) ?
3085 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087 }
3088 if (attn & BNX2X_GRC_RSV) {
3089 val = CHIP_IS_E1H(bp) ?
3090 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003093 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003094 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003095}
3096
3097static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003099 struct attn_route attn;
3100 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003101 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003102 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003103 u32 reg_addr;
3104 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003105 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003106
3107 /* need to take HW lock because MCP or other port might also
3108 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003109 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003110
3111 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003115 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003117
3118 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119 if (deasserted & (1 << index)) {
3120 group_mask = bp->attn_group[index];
3121
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003122 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123 index, group_mask.sig[0], group_mask.sig[1],
3124 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003125
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003126 bnx2x_attn_int_deasserted3(bp,
3127 attn.sig[3] & group_mask.sig[3]);
3128 bnx2x_attn_int_deasserted1(bp,
3129 attn.sig[1] & group_mask.sig[1]);
3130 bnx2x_attn_int_deasserted2(bp,
3131 attn.sig[2] & group_mask.sig[2]);
3132 bnx2x_attn_int_deasserted0(bp,
3133 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003134
3135 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003136 HW_PRTY_ASSERT_SET_0) ||
3137 (attn.sig[1] & group_mask.sig[1] &
3138 HW_PRTY_ASSERT_SET_1) ||
3139 (attn.sig[2] & group_mask.sig[2] &
3140 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003141 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003142 }
3143 }
3144
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003145 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003146
Eilon Greenstein5c862842008-08-13 15:51:48 -07003147 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003148
3149 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003150 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003152 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003153
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003154 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003155 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003156
3157 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003160 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003162
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003163 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3164 aeu_mask, deasserted);
3165 aeu_mask |= (deasserted & 0xff);
3166 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168 REG_WR(bp, reg_addr, aeu_mask);
3169 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003170
3171 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172 bp->attn_state &= ~deasserted;
3173 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174}
3175
3176static void bnx2x_attn_int(struct bnx2x *bp)
3177{
3178 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003179 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180 attn_bits);
3181 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003183 u32 attn_state = bp->attn_state;
3184
3185 /* look for changed bits */
3186 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3187 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3188
3189 DP(NETIF_MSG_HW,
3190 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3191 attn_bits, attn_ack, asserted, deasserted);
3192
3193 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003194 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003195
3196 /* handle bits that were raised */
3197 if (asserted)
3198 bnx2x_attn_int_asserted(bp, asserted);
3199
3200 if (deasserted)
3201 bnx2x_attn_int_deasserted(bp, deasserted);
3202}
3203
3204static void bnx2x_sp_task(struct work_struct *work)
3205{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003206 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003207 u16 status;
3208
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003209
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003210 /* Return here if interrupt is disabled */
3211 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003212 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003213 return;
3214 }
3215
3216 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003217/* if (status == 0) */
3218/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219
Eilon Greenstein3196a882008-08-13 15:58:49 -07003220 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003221
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003222 /* HW attentions */
3223 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003224 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003225
Eilon Greenstein68d59482009-01-14 21:27:36 -08003226 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003227 IGU_INT_NOP, 1);
3228 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229 IGU_INT_NOP, 1);
3230 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231 IGU_INT_NOP, 1);
3232 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233 IGU_INT_NOP, 1);
3234 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003236
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003237}
3238
3239static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240{
3241 struct net_device *dev = dev_instance;
3242 struct bnx2x *bp = netdev_priv(dev);
3243
3244 /* Return here if interrupt is disabled */
3245 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003246 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003247 return IRQ_HANDLED;
3248 }
3249
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003251
3252#ifdef BNX2X_STOP_ON_ERROR
3253 if (unlikely(bp->panic))
3254 return IRQ_HANDLED;
3255#endif
3256
Michael Chan993ac7b2009-10-10 13:46:56 +00003257#ifdef BCM_CNIC
3258 {
3259 struct cnic_ops *c_ops;
3260
3261 rcu_read_lock();
3262 c_ops = rcu_dereference(bp->cnic_ops);
3263 if (c_ops)
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3265 rcu_read_unlock();
3266 }
3267#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003269
3270 return IRQ_HANDLED;
3271}
3272
3273/* end of slow path */
3274
3275/* Statistics */
3276
3277/****************************************************************************
3278* Macros
3279****************************************************************************/
3280
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003281/* sum[hi:lo] += add[hi:lo] */
3282#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283 do { \
3284 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003285 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003286 } while (0)
3287
3288/* difference = minuend - subtrahend */
3289#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003291 if (m_lo < s_lo) { \
3292 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003293 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003294 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003295 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003296 d_hi--; \
3297 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003298 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003299 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003300 d_hi = 0; \
3301 d_lo = 0; \
3302 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003303 } else { \
3304 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003305 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003306 d_hi = 0; \
3307 d_lo = 0; \
3308 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003309 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003310 d_hi = m_hi - s_hi; \
3311 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003312 } \
3313 } \
3314 } while (0)
3315
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003316#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003317 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003318 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003324 } while (0)
3325
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003326#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003327 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003328 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329 diff.lo, new->s##_lo, old->s##_lo); \
3330 ADD_64(estats->t##_hi, diff.hi, \
3331 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003332 } while (0)
3333
3334/* sum[hi:lo] += add */
3335#define ADD_EXTEND_64(s_hi, s_lo, a) \
3336 do { \
3337 s_lo += a; \
3338 s_hi += (s_lo < a) ? 1 : 0; \
3339 } while (0)
3340
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003341#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003343 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344 pstats->mac_stx[1].s##_lo, \
3345 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003346 } while (0)
3347
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003348#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003349 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003350 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003352 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353 } while (0)
3354
3355#define UPDATE_EXTEND_USTAT(s, t) \
3356 do { \
3357 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358 old_uclient->s = uclient->s; \
3359 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003360 } while (0)
3361
3362#define UPDATE_EXTEND_XSTAT(s, t) \
3363 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003364 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003366 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367 } while (0)
3368
3369/* minuend -= subtrahend */
3370#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371 do { \
3372 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373 } while (0)
3374
3375/* minuend[hi:lo] -= subtrahend */
3376#define SUB_EXTEND_64(m_hi, m_lo, s) \
3377 do { \
3378 SUB_64(m_hi, 0, m_lo, s); \
3379 } while (0)
3380
3381#define SUB_EXTEND_USTAT(s, t) \
3382 do { \
3383 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003385 } while (0)
3386
3387/*
3388 * General service functions
3389 */
3390
3391static inline long bnx2x_hilo(u32 *hiref)
3392{
3393 u32 lo = *(hiref + 1);
3394#if (BITS_PER_LONG == 64)
3395 u32 hi = *hiref;
3396
3397 return HILO_U64(hi, lo);
3398#else
3399 return lo;
3400#endif
3401}
3402
3403/*
3404 * Init service functions
3405 */
3406
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003407static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408{
3409 if (!bp->stats_pending) {
3410 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003411 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003412
3413 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003414 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003415 for_each_queue(bp, i)
3416 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003417
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419 ((u32 *)&ramrod_data)[1],
3420 ((u32 *)&ramrod_data)[0], 0);
3421 if (rc == 0) {
3422 /* stats ramrod has it's own slot on the spq */
3423 bp->spq_left++;
3424 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003425 }
3426 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003427}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003428
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003429static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430{
3431 struct dmae_command *dmae = &bp->stats_dmae;
3432 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003435 if (CHIP_REV_IS_SLOW(bp))
3436 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003437
3438 /* loader */
3439 if (bp->executer_idx) {
3440 int loader_idx = PMF_DMAE_C(bp);
3441
3442 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446 DMAE_CMD_DST_RESET |
3447#ifdef __BIG_ENDIAN
3448 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449#else
3450 DMAE_CMD_ENDIANITY_DW_SWAP |
3451#endif
3452 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453 DMAE_CMD_PORT_0) |
3454 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458 sizeof(struct dmae_command) *
3459 (loader_idx + 1)) >> 2;
3460 dmae->dst_addr_hi = 0;
3461 dmae->len = sizeof(struct dmae_command) >> 2;
3462 if (CHIP_IS_E1(bp))
3463 dmae->len--;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465 dmae->comp_addr_hi = 0;
3466 dmae->comp_val = 1;
3467
3468 *stats_comp = 0;
3469 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471 } else if (bp->func_stx) {
3472 *stats_comp = 0;
3473 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474 }
3475}
3476
3477static int bnx2x_stats_comp(struct bnx2x *bp)
3478{
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480 int cnt = 10;
3481
3482 might_sleep();
3483 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003484 if (!cnt) {
3485 BNX2X_ERR("timeout waiting for stats finished\n");
3486 break;
3487 }
3488 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003489 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003490 }
3491 return 1;
3492}
3493
3494/*
3495 * Statistics service functions
3496 */
3497
3498static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499{
3500 struct dmae_command *dmae;
3501 u32 opcode;
3502 int loader_idx = PMF_DMAE_C(bp);
3503 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505 /* sanity */
3506 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003508 return;
3509 }
3510
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003511 bp->executer_idx = 0;
3512
3513 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514 DMAE_CMD_C_ENABLE |
3515 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516#ifdef __BIG_ENDIAN
3517 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518#else
3519 DMAE_CMD_ENDIANITY_DW_SWAP |
3520#endif
3521 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526 dmae->src_addr_lo = bp->port.port_stx >> 2;
3527 dmae->src_addr_hi = 0;
3528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530 dmae->len = DMAE_LEN32_RD_MAX;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
3534
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540 DMAE_LEN32_RD_MAX * 4);
3541 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003543 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546 dmae->comp_val = DMAE_COMP_VAL;
3547
3548 *stats_comp = 0;
3549 bnx2x_hw_stats_post(bp);
3550 bnx2x_stats_comp(bp);
3551}
3552
3553static void bnx2x_port_stats_init(struct bnx2x *bp)
3554{
3555 struct dmae_command *dmae;
3556 int port = BP_PORT(bp);
3557 int vn = BP_E1HVN(bp);
3558 u32 opcode;
3559 int loader_idx = PMF_DMAE_C(bp);
3560 u32 mac_addr;
3561 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563 /* sanity */
3564 if (!bp->link_vars.link_up || !bp->port.pmf) {
3565 BNX2X_ERR("BUG!\n");
3566 return;
3567 }
3568
3569 bp->executer_idx = 0;
3570
3571 /* MCP */
3572 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575#ifdef __BIG_ENDIAN
3576 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577#else
3578 DMAE_CMD_ENDIANITY_DW_SWAP |
3579#endif
3580 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581 (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583 if (bp->port.port_stx) {
3584
3585 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586 dmae->opcode = opcode;
3587 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590 dmae->dst_addr_hi = 0;
3591 dmae->len = sizeof(struct host_port_stats) >> 2;
3592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593 dmae->comp_addr_hi = 0;
3594 dmae->comp_val = 1;
3595 }
3596
3597 if (bp->func_stx) {
3598
3599 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600 dmae->opcode = opcode;
3601 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603 dmae->dst_addr_lo = bp->func_stx >> 2;
3604 dmae->dst_addr_hi = 0;
3605 dmae->len = sizeof(struct host_func_stats) >> 2;
3606 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607 dmae->comp_addr_hi = 0;
3608 dmae->comp_val = 1;
3609 }
3610
3611 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003612 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615#ifdef __BIG_ENDIAN
3616 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617#else
3618 DMAE_CMD_ENDIANITY_DW_SWAP |
3619#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003620 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003622
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003623 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003624
3625 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626 NIG_REG_INGRESS_BMAC0_MEM);
3627
3628 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629 BIGMAC_REGISTER_TX_STAT_GTBYT */
3630 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631 dmae->opcode = opcode;
3632 dmae->src_addr_lo = (mac_addr +
3633 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634 dmae->src_addr_hi = 0;
3635 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646 dmae->opcode = opcode;
3647 dmae->src_addr_lo = (mac_addr +
3648 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649 dmae->src_addr_hi = 0;
3650 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003651 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003653 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003654 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003660 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003661
3662 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666 dmae->opcode = opcode;
3667 dmae->src_addr_lo = (mac_addr +
3668 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669 dmae->src_addr_hi = 0;
3670 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3675 dmae->comp_val = 1;
3676
3677 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679 dmae->opcode = opcode;
3680 dmae->src_addr_lo = (mac_addr +
3681 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682 dmae->src_addr_hi = 0;
3683 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003684 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003685 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003686 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 dmae->len = 1;
3688 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689 dmae->comp_addr_hi = 0;
3690 dmae->comp_val = 1;
3691
3692 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694 dmae->opcode = opcode;
3695 dmae->src_addr_lo = (mac_addr +
3696 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697 dmae->src_addr_hi = 0;
3698 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003699 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003700 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003701 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003702 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704 dmae->comp_addr_hi = 0;
3705 dmae->comp_val = 1;
3706 }
3707
3708 /* NIG */
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718 dmae->comp_addr_hi = 0;
3719 dmae->comp_val = 1;
3720
3721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722 dmae->opcode = opcode;
3723 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725 dmae->src_addr_hi = 0;
3726 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730 dmae->len = (2*sizeof(u32)) >> 2;
3731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732 dmae->comp_addr_hi = 0;
3733 dmae->comp_val = 1;
3734
3735 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003736 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739#ifdef __BIG_ENDIAN
3740 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741#else
3742 DMAE_CMD_ENDIANITY_DW_SWAP |
3743#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003744 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745 (vn << DMAE_CMD_E1HVN_SHIFT));
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003748 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756 dmae->comp_val = DMAE_COMP_VAL;
3757
3758 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759}
3760
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003761static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003762{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003763 struct dmae_command *dmae = &bp->stats_dmae;
3764 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003765
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003766 /* sanity */
3767 if (!bp->func_stx) {
3768 BNX2X_ERR("BUG!\n");
3769 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003770 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003771
3772 bp->executer_idx = 0;
3773 memset(dmae, 0, sizeof(struct dmae_command));
3774
3775 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778#ifdef __BIG_ENDIAN
3779 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780#else
3781 DMAE_CMD_ENDIANITY_DW_SWAP |
3782#endif
3783 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787 dmae->dst_addr_lo = bp->func_stx >> 2;
3788 dmae->dst_addr_hi = 0;
3789 dmae->len = sizeof(struct host_func_stats) >> 2;
3790 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792 dmae->comp_val = DMAE_COMP_VAL;
3793
3794 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003795}
3796
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003797static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003798{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003799 if (bp->port.pmf)
3800 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003802 else if (bp->func_stx)
3803 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003804
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003805 bnx2x_hw_stats_post(bp);
3806 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003807}
3808
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003809static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003810{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003811 bnx2x_stats_comp(bp);
3812 bnx2x_stats_pmf_update(bp);
3813 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003814}
3815
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003816static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003818 bnx2x_stats_comp(bp);
3819 bnx2x_stats_start(bp);
3820}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003822static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823{
3824 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003826 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003827 struct {
3828 u32 lo;
3829 u32 hi;
3830 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003831
3832 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003838 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003839 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003840 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003841 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844 UPDATE_STAT64(tx_stat_gt127,
3845 tx_stat_etherstatspkts65octetsto127octets);
3846 UPDATE_STAT64(tx_stat_gt255,
3847 tx_stat_etherstatspkts128octetsto255octets);
3848 UPDATE_STAT64(tx_stat_gt511,
3849 tx_stat_etherstatspkts256octetsto511octets);
3850 UPDATE_STAT64(tx_stat_gt1023,
3851 tx_stat_etherstatspkts512octetsto1023octets);
3852 UPDATE_STAT64(tx_stat_gt1518,
3853 tx_stat_etherstatspkts1024octetsto1522octets);
3854 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858 UPDATE_STAT64(tx_stat_gterr,
3859 tx_stat_dot3statsinternalmactransmiterrors);
3860 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003871}
3872
3873static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874{
3875 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003877 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003878
3879 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003910
3911 estats->pause_frames_received_hi =
3912 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913 estats->pause_frames_received_lo =
3914 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915 ADD_64(estats->pause_frames_received_hi,
3916 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917 estats->pause_frames_received_lo,
3918 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920 estats->pause_frames_sent_hi =
3921 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922 estats->pause_frames_sent_lo =
3923 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924 ADD_64(estats->pause_frames_sent_hi,
3925 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926 estats->pause_frames_sent_lo,
3927 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003928}
3929
3930static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931{
3932 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933 struct nig_stats *old = &(bp->port.old_nig_stats);
3934 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003936 struct {
3937 u32 lo;
3938 u32 hi;
3939 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003940 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003941
3942 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943 bnx2x_bmac_stats_update(bp);
3944
3945 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946 bnx2x_emac_stats_update(bp);
3947
3948 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003949 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003950 return -1;
3951 }
3952
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003953 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003955 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003957
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003958 UPDATE_STAT64_NIG(egress_mac_pkt0,
3959 etherstatspkts1024octetsto1522octets);
3960 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003961
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003962 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003963
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003964 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965 sizeof(struct mac_stx));
3966 estats->brb_drop_hi = pstats->brb_drop_hi;
3967 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003968
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003969 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003970
Eilon Greensteinde832a52009-02-12 08:36:33 +00003971 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972 if (nig_timer_max != estats->nig_timer_max) {
3973 estats->nig_timer_max = nig_timer_max;
3974 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975 }
3976
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003977 return 0;
3978}
3979
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003980static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003981{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003982 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003983 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003984 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003985 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003987 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003988
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00003989 memcpy(&(fstats->total_bytes_received_hi),
3990 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00003991 sizeof(struct host_func_stats) - 2*sizeof(u32));
3992 estats->error_bytes_received_hi = 0;
3993 estats->error_bytes_received_lo = 0;
3994 estats->etherstatsoverrsizepkts_hi = 0;
3995 estats->etherstatsoverrsizepkts_lo = 0;
3996 estats->no_buff_discard_hi = 0;
3997 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003998
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003999 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004000 struct bnx2x_fastpath *fp = &bp->fp[i];
4001 int cl_id = fp->cl_id;
4002 struct tstorm_per_client_stats *tclient =
4003 &stats->tstorm_common.client_statistics[cl_id];
4004 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005 struct ustorm_per_client_stats *uclient =
4006 &stats->ustorm_common.client_statistics[cl_id];
4007 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008 struct xstorm_per_client_stats *xclient =
4009 &stats->xstorm_common.client_statistics[cl_id];
4010 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012 u32 diff;
4013
4014 /* are storm stats valid? */
4015 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016 bp->stats_counter) {
4017 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018 " xstorm counter (%d) != stats_counter (%d)\n",
4019 i, xclient->stats_counter, bp->stats_counter);
4020 return -1;
4021 }
4022 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023 bp->stats_counter) {
4024 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025 " tstorm counter (%d) != stats_counter (%d)\n",
4026 i, tclient->stats_counter, bp->stats_counter);
4027 return -2;
4028 }
4029 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030 bp->stats_counter) {
4031 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032 " ustorm counter (%d) != stats_counter (%d)\n",
4033 i, uclient->stats_counter, bp->stats_counter);
4034 return -4;
4035 }
4036
4037 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004038 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004039 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004040 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042 ADD_64(qstats->total_bytes_received_hi,
4043 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044 qstats->total_bytes_received_lo,
4045 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047 ADD_64(qstats->total_bytes_received_hi,
4048 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049 qstats->total_bytes_received_lo,
4050 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052 qstats->valid_bytes_received_hi =
4053 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004054 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004055 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004056
Eilon Greensteinde832a52009-02-12 08:36:33 +00004057 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004058 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004059 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004060 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004061
4062 ADD_64(qstats->total_bytes_received_hi,
4063 qstats->error_bytes_received_hi,
4064 qstats->total_bytes_received_lo,
4065 qstats->error_bytes_received_lo);
4066
4067 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068 total_unicast_packets_received);
4069 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070 total_multicast_packets_received);
4071 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072 total_broadcast_packets_received);
4073 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074 etherstatsoverrsizepkts);
4075 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078 total_unicast_packets_received);
4079 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080 total_multicast_packets_received);
4081 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004088 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004089 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004090 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092 ADD_64(qstats->total_bytes_transmitted_hi,
4093 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094 qstats->total_bytes_transmitted_lo,
4095 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097 ADD_64(qstats->total_bytes_transmitted_hi,
4098 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099 qstats->total_bytes_transmitted_lo,
4100 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004101
4102 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103 total_unicast_packets_transmitted);
4104 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105 total_multicast_packets_transmitted);
4106 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107 total_broadcast_packets_transmitted);
4108
4109 old_tclient->checksum_discard = tclient->checksum_discard;
4110 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 qstats->total_bytes_received_hi,
4114 fstats->total_bytes_received_lo,
4115 qstats->total_bytes_received_lo);
4116 ADD_64(fstats->total_bytes_transmitted_hi,
4117 qstats->total_bytes_transmitted_hi,
4118 fstats->total_bytes_transmitted_lo,
4119 qstats->total_bytes_transmitted_lo);
4120 ADD_64(fstats->total_unicast_packets_received_hi,
4121 qstats->total_unicast_packets_received_hi,
4122 fstats->total_unicast_packets_received_lo,
4123 qstats->total_unicast_packets_received_lo);
4124 ADD_64(fstats->total_multicast_packets_received_hi,
4125 qstats->total_multicast_packets_received_hi,
4126 fstats->total_multicast_packets_received_lo,
4127 qstats->total_multicast_packets_received_lo);
4128 ADD_64(fstats->total_broadcast_packets_received_hi,
4129 qstats->total_broadcast_packets_received_hi,
4130 fstats->total_broadcast_packets_received_lo,
4131 qstats->total_broadcast_packets_received_lo);
4132 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133 qstats->total_unicast_packets_transmitted_hi,
4134 fstats->total_unicast_packets_transmitted_lo,
4135 qstats->total_unicast_packets_transmitted_lo);
4136 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137 qstats->total_multicast_packets_transmitted_hi,
4138 fstats->total_multicast_packets_transmitted_lo,
4139 qstats->total_multicast_packets_transmitted_lo);
4140 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141 qstats->total_broadcast_packets_transmitted_hi,
4142 fstats->total_broadcast_packets_transmitted_lo,
4143 qstats->total_broadcast_packets_transmitted_lo);
4144 ADD_64(fstats->valid_bytes_received_hi,
4145 qstats->valid_bytes_received_hi,
4146 fstats->valid_bytes_received_lo,
4147 qstats->valid_bytes_received_lo);
4148
4149 ADD_64(estats->error_bytes_received_hi,
4150 qstats->error_bytes_received_hi,
4151 estats->error_bytes_received_lo,
4152 qstats->error_bytes_received_lo);
4153 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154 qstats->etherstatsoverrsizepkts_hi,
4155 estats->etherstatsoverrsizepkts_lo,
4156 qstats->etherstatsoverrsizepkts_lo);
4157 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159 }
4160
4161 ADD_64(fstats->total_bytes_received_hi,
4162 estats->rx_stat_ifhcinbadoctets_hi,
4163 fstats->total_bytes_received_lo,
4164 estats->rx_stat_ifhcinbadoctets_lo);
4165
4166 memcpy(estats, &(fstats->total_bytes_received_hi),
4167 sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 estats->rx_stat_dot3statsframestoolong_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004173 ADD_64(estats->error_bytes_received_hi,
4174 estats->rx_stat_ifhcinbadoctets_hi,
4175 estats->error_bytes_received_lo,
4176 estats->rx_stat_ifhcinbadoctets_lo);
4177
Eilon Greensteinde832a52009-02-12 08:36:33 +00004178 if (bp->port.pmf) {
4179 estats->mac_filter_discard =
4180 le32_to_cpu(tport->mac_filter_discard);
4181 estats->xxoverflow_discard =
4182 le32_to_cpu(tport->xxoverflow_discard);
4183 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004184 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004185 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004187
4188 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
Eilon Greensteinde832a52009-02-12 08:36:33 +00004190 bp->stats_pending = 0;
4191
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004192 return 0;
4193}
4194
4195static void bnx2x_net_stats_update(struct bnx2x *bp)
4196{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004197 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004198 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004199 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200
4201 nstats->rx_packets =
4202 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206 nstats->tx_packets =
4207 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
Eilon Greensteinde832a52009-02-12 08:36:33 +00004211 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214
Eilon Greensteinde832a52009-02-12 08:36:33 +00004215 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004216 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004217 nstats->rx_dropped +=
4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220 nstats->tx_dropped = 0;
4221
4222 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004223 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004224
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004225 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004226 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004227
4228 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004229 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232 bnx2x_hilo(&estats->brb_truncate_hi);
4233 nstats->rx_crc_errors =
4234 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235 nstats->rx_frame_errors =
4236 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238 nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240 nstats->rx_errors = nstats->rx_length_errors +
4241 nstats->rx_over_errors +
4242 nstats->rx_crc_errors +
4243 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004244 nstats->rx_fifo_errors +
4245 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004246
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004247 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004248 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250 nstats->tx_carrier_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004252 nstats->tx_fifo_errors = 0;
4253 nstats->tx_heartbeat_errors = 0;
4254 nstats->tx_window_errors = 0;
4255
4256 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004257 nstats->tx_carrier_errors +
4258 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259}
4260
4261static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262{
4263 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264 int i;
4265
4266 estats->driver_xoff = 0;
4267 estats->rx_err_discard_pkt = 0;
4268 estats->rx_skb_alloc_failed = 0;
4269 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004270 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273 estats->driver_xoff += qstats->driver_xoff;
4274 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276 estats->hw_csum_err += qstats->hw_csum_err;
4277 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004278}
4279
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004280static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004281{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004282 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004283
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004284 if (*stats_comp != DMAE_COMP_VAL)
4285 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004286
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004287 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004288 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289
Eilon Greensteinde832a52009-02-12 08:36:33 +00004290 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292 bnx2x_panic();
4293 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294 }
4295
Eilon Greensteinde832a52009-02-12 08:36:33 +00004296 bnx2x_net_stats_update(bp);
4297 bnx2x_drv_stats_update(bp);
4298
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004299 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004300 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004301 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004302 struct tstorm_per_client_stats *old_tclient =
4303 &bp->fp->old_tclient;
4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004305 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004306 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004307 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004308
4309 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4311 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004312 bnx2x_tx_avail(fp0_tx),
4313 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004314 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4315 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004316 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317 fp0_rx->rx_comp_cons),
4318 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004319 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4320 "brb truncate %u\n",
4321 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322 qstats->driver_xoff,
4323 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004324 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004325 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326 "mac_discard %u mac_filter_discard %u "
4327 "xxovrflow_discard %u brb_truncate_discard %u "
4328 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004329 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004330 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331 bnx2x_hilo(&qstats->no_buff_discard_hi),
4332 estats->mac_discard, estats->mac_filter_discard,
4333 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004334 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335
4336 for_each_queue(bp, i) {
4337 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338 bnx2x_fp(bp, i, tx_pkt),
4339 bnx2x_fp(bp, i, rx_pkt),
4340 bnx2x_fp(bp, i, rx_calls));
4341 }
4342 }
4343
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004344 bnx2x_hw_stats_post(bp);
4345 bnx2x_storm_stats_post(bp);
4346}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004348static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349{
4350 struct dmae_command *dmae;
4351 u32 opcode;
4352 int loader_idx = PMF_DMAE_C(bp);
4353 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004354
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004355 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004356
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004357 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358 DMAE_CMD_C_ENABLE |
4359 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004361 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004362#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004363 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004364#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004365 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368 if (bp->port.port_stx) {
4369
4370 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371 if (bp->func_stx)
4372 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373 else
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004378 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004379 dmae->len = sizeof(struct host_port_stats) >> 2;
4380 if (bp->func_stx) {
4381 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382 dmae->comp_addr_hi = 0;
4383 dmae->comp_val = 1;
4384 } else {
4385 dmae->comp_addr_lo =
4386 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi =
4388 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004390
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004391 *stats_comp = 0;
4392 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004393 }
4394
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004395 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004396
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401 dmae->dst_addr_lo = bp->func_stx >> 2;
4402 dmae->dst_addr_hi = 0;
4403 dmae->len = sizeof(struct host_func_stats) >> 2;
4404 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
4407
4408 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004409 }
4410}
4411
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004412static void bnx2x_stats_stop(struct bnx2x *bp)
4413{
4414 int update = 0;
4415
4416 bnx2x_stats_comp(bp);
4417
4418 if (bp->port.pmf)
4419 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421 update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423 if (update) {
4424 bnx2x_net_stats_update(bp);
4425
4426 if (bp->port.pmf)
4427 bnx2x_port_stats_stop(bp);
4428
4429 bnx2x_hw_stats_post(bp);
4430 bnx2x_stats_comp(bp);
4431 }
4432}
4433
4434static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435{
4436}
4437
4438static const struct {
4439 void (*action)(struct bnx2x *bp);
4440 enum bnx2x_stats_state next_state;
4441} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442/* state event */
4443{
4444/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4446/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448},
4449{
4450/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4451/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4452/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4453/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4454}
4455};
4456
4457static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458{
4459 enum bnx2x_stats_state state = bp->stats_state;
4460
4461 bnx2x_stats_stm[state][event].action(bp);
4462 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
Eilon Greenstein89246652009-08-12 08:23:56 +00004464 /* Make sure the state has been "changed" */
4465 smp_wmb();
4466
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004467 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469 state, event, bp->stats_state);
4470}
4471
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004472static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473{
4474 struct dmae_command *dmae;
4475 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477 /* sanity */
4478 if (!bp->port.pmf || !bp->port.port_stx) {
4479 BNX2X_ERR("BUG!\n");
4480 return;
4481 }
4482
4483 bp->executer_idx = 0;
4484
4485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489#ifdef __BIG_ENDIAN
4490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491#else
4492 DMAE_CMD_ENDIANITY_DW_SWAP |
4493#endif
4494 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499 dmae->dst_addr_hi = 0;
4500 dmae->len = sizeof(struct host_port_stats) >> 2;
4501 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503 dmae->comp_val = DMAE_COMP_VAL;
4504
4505 *stats_comp = 0;
4506 bnx2x_hw_stats_post(bp);
4507 bnx2x_stats_comp(bp);
4508}
4509
4510static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511{
4512 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513 int port = BP_PORT(bp);
4514 int func;
4515 u32 func_stx;
4516
4517 /* sanity */
4518 if (!bp->port.pmf || !bp->func_stx) {
4519 BNX2X_ERR("BUG!\n");
4520 return;
4521 }
4522
4523 /* save our func_stx */
4524 func_stx = bp->func_stx;
4525
4526 for (vn = VN_0; vn < vn_max; vn++) {
4527 func = 2*vn + port;
4528
4529 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530 bnx2x_func_stats_init(bp);
4531 bnx2x_hw_stats_post(bp);
4532 bnx2x_stats_comp(bp);
4533 }
4534
4535 /* restore our func_stx */
4536 bp->func_stx = func_stx;
4537}
4538
4539static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540{
4541 struct dmae_command *dmae = &bp->stats_dmae;
4542 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544 /* sanity */
4545 if (!bp->func_stx) {
4546 BNX2X_ERR("BUG!\n");
4547 return;
4548 }
4549
4550 bp->executer_idx = 0;
4551 memset(dmae, 0, sizeof(struct dmae_command));
4552
4553 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556#ifdef __BIG_ENDIAN
4557 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558#else
4559 DMAE_CMD_ENDIANITY_DW_SWAP |
4560#endif
4561 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563 dmae->src_addr_lo = bp->func_stx >> 2;
4564 dmae->src_addr_hi = 0;
4565 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567 dmae->len = sizeof(struct host_func_stats) >> 2;
4568 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570 dmae->comp_val = DMAE_COMP_VAL;
4571
4572 *stats_comp = 0;
4573 bnx2x_hw_stats_post(bp);
4574 bnx2x_stats_comp(bp);
4575}
4576
4577static void bnx2x_stats_init(struct bnx2x *bp)
4578{
4579 int port = BP_PORT(bp);
4580 int func = BP_FUNC(bp);
4581 int i;
4582
4583 bp->stats_pending = 0;
4584 bp->executer_idx = 0;
4585 bp->stats_counter = 0;
4586
4587 /* port and func stats for management */
4588 if (!BP_NOMCP(bp)) {
4589 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592 } else {
4593 bp->port.port_stx = 0;
4594 bp->func_stx = 0;
4595 }
4596 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4597 bp->port.port_stx, bp->func_stx);
4598
4599 /* port stats */
4600 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601 bp->port.old_nig_stats.brb_discard =
4602 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603 bp->port.old_nig_stats.brb_truncate =
4604 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610 /* function stats */
4611 for_each_queue(bp, i) {
4612 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614 memset(&fp->old_tclient, 0,
4615 sizeof(struct tstorm_per_client_stats));
4616 memset(&fp->old_uclient, 0,
4617 sizeof(struct ustorm_per_client_stats));
4618 memset(&fp->old_xclient, 0,
4619 sizeof(struct xstorm_per_client_stats));
4620 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621 }
4622
4623 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626 bp->stats_state = STATS_STATE_DISABLED;
4627
4628 if (bp->port.pmf) {
4629 if (bp->port.port_stx)
4630 bnx2x_port_stats_base_init(bp);
4631
4632 if (bp->func_stx)
4633 bnx2x_func_stats_base_init(bp);
4634
4635 } else if (bp->func_stx)
4636 bnx2x_func_stats_base_update(bp);
4637}
4638
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004639static void bnx2x_timer(unsigned long data)
4640{
4641 struct bnx2x *bp = (struct bnx2x *) data;
4642
4643 if (!netif_running(bp->dev))
4644 return;
4645
4646 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004647 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004648
4649 if (poll) {
4650 struct bnx2x_fastpath *fp = &bp->fp[0];
4651 int rc;
4652
Eilon Greenstein7961f792009-03-02 07:59:31 +00004653 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004654 rc = bnx2x_rx_int(fp, 1000);
4655 }
4656
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 if (!BP_NOMCP(bp)) {
4658 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659 u32 drv_pulse;
4660 u32 mcp_pulse;
4661
4662 ++bp->fw_drv_pulse_wr_seq;
4663 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664 /* TBD - add SYSTEM_TIME */
4665 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004666 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004669 MCP_PULSE_SEQ_MASK);
4670 /* The delta between driver pulse and mcp response
4671 * should be 1 (before mcp response) or 0 (after mcp response)
4672 */
4673 if ((drv_pulse != mcp_pulse) &&
4674 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675 /* someone lost a heartbeat... */
4676 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677 drv_pulse, mcp_pulse);
4678 }
4679 }
4680
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004681 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004683
Eliezer Tamirf1410642008-02-28 11:51:50 -08004684timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004685 mod_timer(&bp->timer, jiffies + bp->current_interval);
4686}
4687
4688/* end of Statistics */
4689
4690/* nic init */
4691
4692/*
4693 * nic init service functions
4694 */
4695
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004696static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004697{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004698 int port = BP_PORT(bp);
4699
Eilon Greensteinca003922009-08-12 22:53:28 -07004700 /* "CSTORM" */
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004707}
4708
Eilon Greenstein5c862842008-08-13 15:51:48 -07004709static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004711{
4712 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004713 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004714 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004715 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004716
4717 /* USTORM */
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4719 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004720 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004721
Eilon Greensteinca003922009-08-12 22:53:28 -07004722 REG_WR(bp, BAR_CSTRORM_INTMEM +
4723 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
4725 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004726 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729
4730 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004733
4734 /* CSTORM */
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4736 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004737 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004738
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004740 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004741 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004742 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004745 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004746
4747 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004749 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004751 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752}
4753
4754static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755{
4756 int func = BP_FUNC(bp);
4757
Eilon Greensteinca003922009-08-12 22:53:28 -07004758 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004759 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004761 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763 sizeof(struct cstorm_def_status_block_u)/4);
4764 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766 sizeof(struct cstorm_def_status_block_c)/4);
4767 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004768 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004770}
4771
4772static void bnx2x_init_def_sb(struct bnx2x *bp,
4773 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004774 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004775{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004776 int port = BP_PORT(bp);
4777 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004778 int index, val, reg_offset;
4779 u64 section;
4780
4781 /* ATTN */
4782 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004785
Eliezer Tamir49d66772008-02-28 11:53:13 -08004786 bp->attn_state = 0;
4787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004788 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004791 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004792 bp->attn_group[index].sig[0] = REG_RD(bp,
4793 reg_offset + 0x10*index);
4794 bp->attn_group[index].sig[1] = REG_RD(bp,
4795 reg_offset + 0x4 + 0x10*index);
4796 bp->attn_group[index].sig[2] = REG_RD(bp,
4797 reg_offset + 0x8 + 0x10*index);
4798 bp->attn_group[index].sig[3] = REG_RD(bp,
4799 reg_offset + 0xc + 0x10*index);
4800 }
4801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004802 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803 HC_REG_ATTN_MSG0_ADDR_L);
4804
4805 REG_WR(bp, reg_offset, U64_LO(section));
4806 REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004811 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004812 REG_WR(bp, reg_offset, val);
4813
4814 /* USTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004817 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004818
Eilon Greensteinca003922009-08-12 22:53:28 -07004819 REG_WR(bp, BAR_CSTRORM_INTMEM +
4820 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004826
4827 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004830
4831 /* CSTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004834 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004835
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004837 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004838 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004839 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004840 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004843
4844 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004846 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004847
4848 /* TSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004851 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004852
4853 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004854 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004855 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004856 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004857 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004858 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004859 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004860
4861 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004863 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004864
4865 /* XSTORM */
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004868 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004869
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004871 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004872 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004873 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004874 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004875 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004876 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004877
4878 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004881
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004882 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004883 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004884
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004885 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886}
4887
4888static void bnx2x_update_coalesce(struct bnx2x *bp)
4889{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004890 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004891 int i;
4892
4893 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004894 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004895
4896 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004900 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07004901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004905
4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004910 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004915 }
4916}
4917
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004918static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919 struct bnx2x_fastpath *fp, int last)
4920{
4921 int i;
4922
4923 for (i = 0; i < last; i++) {
4924 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925 struct sk_buff *skb = rx_buf->skb;
4926
4927 if (skb == NULL) {
4928 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929 continue;
4930 }
4931
4932 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933 pci_unmap_single(bp->pdev,
4934 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004935 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004936
4937 dev_kfree_skb(skb);
4938 rx_buf->skb = NULL;
4939 }
4940}
4941
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004942static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004944 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004945 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946 ETH_MAX_AGGREGATION_QUEUES_E1H;
4947 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004948 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004949
Eilon Greenstein87942b42009-02-12 08:36:49 +00004950 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004951 DP(NETIF_MSG_IFUP,
4952 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004953
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004954 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004955
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004956 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004957 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004958
Eilon Greenstein32626232008-08-13 15:51:07 -07004959 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004960 fp->tpa_pool[i].skb =
4961 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962 if (!fp->tpa_pool[i].skb) {
4963 BNX2X_ERR("Failed to allocate TPA "
4964 "skb pool for queue[%d] - "
4965 "disabling TPA on this "
4966 "queue!\n", j);
4967 bnx2x_free_tpa_pool(bp, fp, i);
4968 fp->disable_tpa = 1;
4969 break;
4970 }
4971 pci_unmap_addr_set((struct sw_rx_bd *)
4972 &bp->fp->tpa_pool[i],
4973 mapping, 0);
4974 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975 }
4976 }
4977 }
4978
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004979 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004980 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982 fp->rx_bd_cons = 0;
4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004985
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004986 /* "next page" elements initialization */
4987 /* SGE ring */
4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989 struct eth_rx_sge *sge;
4990
4991 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992 sge->addr_hi =
4993 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995 sge->addr_lo =
4996 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998 }
4999
5000 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004 struct eth_rx_bd *rx_bd;
5005
5006 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007 rx_bd->addr_hi =
5008 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005009 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005010 rx_bd->addr_lo =
5011 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005012 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005013 }
5014
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005015 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005016 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017 struct eth_rx_cqe_next_page *nextpg;
5018
5019 nextpg = (struct eth_rx_cqe_next_page *)
5020 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021 nextpg->addr_hi =
5022 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005023 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024 nextpg->addr_lo =
5025 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005026 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005027 }
5028
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005029 /* Allocate SGEs and initialize the ring elements */
5030 for (i = 0, ring_prod = 0;
5031 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005032
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005033 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034 BNX2X_ERR("was only able to allocate "
5035 "%d rx sges\n", i);
5036 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037 /* Cleanup already allocated elements */
5038 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005039 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005040 fp->disable_tpa = 1;
5041 ring_prod = 0;
5042 break;
5043 }
5044 ring_prod = NEXT_SGE_IDX(ring_prod);
5045 }
5046 fp->rx_sge_prod = ring_prod;
5047
5048 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005049 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005050 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005051 for (i = 0; i < bp->rx_ring_size; i++) {
5052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005054 "%d rx skbs on queue[%d]\n", i, j);
5055 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005056 break;
5057 }
5058 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005059 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005060 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005061 }
5062
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005063 fp->rx_bd_prod = ring_prod;
5064 /* must not have more available CQEs than BDs */
5065 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005067 fp->rx_pkt = fp->rx_calls = 0;
5068
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005069 /* Warning!
5070 * this will generate an interrupt (to the TSTORM)
5071 * must only be done after chip is initialized
5072 */
5073 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005075 if (j != 0)
5076 continue;
5077
5078 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005079 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005080 U64_LO(fp->rx_comp_mapping));
5081 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005082 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005083 U64_HI(fp->rx_comp_mapping));
5084 }
5085}
5086
5087static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088{
5089 int i, j;
5090
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005091 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005092 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005095 struct eth_tx_next_bd *tx_next_bd =
5096 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005097
Eilon Greensteinca003922009-08-12 22:53:28 -07005098 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005099 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005100 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005101 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005102 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005103 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005104 }
5105
Eilon Greensteinca003922009-08-12 22:53:28 -07005106 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107 fp->tx_db.data.zero_fill1 = 0;
5108 fp->tx_db.data.prod = 0;
5109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110 fp->tx_pkt_prod = 0;
5111 fp->tx_pkt_cons = 0;
5112 fp->tx_bd_prod = 0;
5113 fp->tx_bd_cons = 0;
5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115 fp->tx_pkt = 0;
5116 }
5117}
5118
5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005121 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005122
5123 spin_lock_init(&bp->spq_lock);
5124
5125 bp->spq_left = MAX_SPQ_PENDING;
5126 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005127 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128 bp->spq_prod_bd = bp->spq;
5129 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005131 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005132 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005133 REG_WR(bp,
5134 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005135 U64_HI(bp->spq_mapping));
5136
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005137 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138 bp->spq_prod_idx);
5139}
5140
5141static void bnx2x_init_context(struct bnx2x *bp)
5142{
5143 int i;
5144
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005145 /* Rx */
5146 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005149 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005150
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005151 context->ustorm_st_context.common.sb_index_numbers =
5152 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005153 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005154 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005155 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005156 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158 context->ustorm_st_context.common.statistics_counter_id =
5159 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005160 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005161 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005163 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005164 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005165 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005166 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005167 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005168 if (!fp->disable_tpa) {
5169 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005170 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005171 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005172 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005174 context->ustorm_st_context.common.sge_page_base_hi =
5175 U64_HI(fp->rx_sge_mapping);
5176 context->ustorm_st_context.common.sge_page_base_lo =
5177 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005178
5179 context->ustorm_st_context.common.max_sges_for_packet =
5180 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181 context->ustorm_st_context.common.max_sges_for_packet =
5182 ((context->ustorm_st_context.common.
5183 max_sges_for_packet + PAGES_PER_SGE - 1) &
5184 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005185 }
5186
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005187 context->ustorm_ag_context.cdu_usage =
5188 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189 CDU_REGION_NUMBER_UCM_AG,
5190 ETH_CONNECTION_TYPE);
5191
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005192 context->xstorm_ag_context.cdu_reserved =
5193 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194 CDU_REGION_NUMBER_XCM_AG,
5195 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005196 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005197
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005198 /* Tx */
5199 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005200 struct bnx2x_fastpath *fp = &bp->fp[i];
5201 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005202 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005203
5204 context->cstorm_st_context.sb_index_number =
5205 C_SB_ETH_TX_CQ_INDEX;
5206 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208 context->xstorm_st_context.tx_bd_page_base_hi =
5209 U64_HI(fp->tx_desc_mapping);
5210 context->xstorm_st_context.tx_bd_page_base_lo =
5211 U64_LO(fp->tx_desc_mapping);
5212 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005215}
5216
5217static void bnx2x_init_ind_table(struct bnx2x *bp)
5218{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005219 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005220 int i;
5221
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005222 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005223 return;
5224
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005225 DP(NETIF_MSG_IFUP,
5226 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005230 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231}
5232
Eliezer Tamir49d66772008-02-28 11:53:13 -08005233static void bnx2x_set_client_config(struct bnx2x *bp)
5234{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005235 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005236 int port = BP_PORT(bp);
5237 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005238
Eilon Greensteine7799c52009-01-14 21:30:27 -08005239 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005240 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005241 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005243#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005244 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005245 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005246 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005247 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248 }
5249#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005250
5251 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005252 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
Eliezer Tamir49d66772008-02-28 11:53:13 -08005254 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005255 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005256 ((u32 *)&tstorm_client)[0]);
5257 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005258 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005259 ((u32 *)&tstorm_client)[1]);
5260 }
5261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005262 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005264}
5265
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005269 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005270 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005271 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005272 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005273 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005274 /* All but management unicast packets should pass to the host as well */
5275 u32 llh_mask =
5276 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280
Eilon Greenstein3196a882008-08-13 15:58:49 -07005281 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005282
5283 switch (mode) {
5284 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005285 tstorm_mac_filter.ucast_drop_all = mask;
5286 tstorm_mac_filter.mcast_drop_all = mask;
5287 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005288 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005289
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005290 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005291 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005293
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005294 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005295 tstorm_mac_filter.mcast_accept_all = mask;
5296 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005297 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005298
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005299 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005300 tstorm_mac_filter.ucast_accept_all = mask;
5301 tstorm_mac_filter.mcast_accept_all = mask;
5302 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005303 /* pass management unicast packets as well */
5304 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005306
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005307 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005308 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310 }
5311
Eilon Greenstein581ce432009-07-29 00:20:04 +00005312 REG_WR(bp,
5313 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314 llh_mask);
5315
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005318 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005319 ((u32 *)&tstorm_mac_filter)[i]);
5320
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005321/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322 ((u32 *)&tstorm_mac_filter)[i]); */
5323 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005324
Eliezer Tamir49d66772008-02-28 11:53:13 -08005325 if (mode != BNX2X_RX_MODE_NONE)
5326 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327}
5328
Eilon Greenstein471de712008-08-13 15:49:35 -07005329static void bnx2x_init_internal_common(struct bnx2x *bp)
5330{
5331 int i;
5332
5333 /* Zero this manually as its initialization is
5334 currently missing in the initTool */
5335 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336 REG_WR(bp, BAR_USTRORM_INTMEM +
5337 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338}
5339
5340static void bnx2x_init_internal_port(struct bnx2x *bp)
5341{
5342 int port = BP_PORT(bp);
5343
Eilon Greensteinca003922009-08-12 22:53:28 -07005344 REG_WR(bp,
5345 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346 REG_WR(bp,
5347 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005348 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350}
5351
5352static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005353{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005354 struct tstorm_eth_function_common_config tstorm_config = {0};
5355 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005356 int port = BP_PORT(bp);
5357 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005358 int i, j;
5359 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005360 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005361
5362 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005363 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005364 tstorm_config.rss_result_mask = MULTI_MASK;
5365 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005366
5367 /* Enable TPA if needed */
5368 if (bp->flags & TPA_ENABLE_FLAG)
5369 tstorm_config.config_flags |=
5370 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005372 if (IS_E1HMF(bp))
5373 tstorm_config.config_flags |=
5374 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005375
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005376 tstorm_config.leading_client_id = BP_L_ID(bp);
5377
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005378 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005379 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005380 (*(u32 *)&tstorm_config));
5381
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005384 bnx2x_set_storm_rx_mode(bp);
5385
Eilon Greensteinde832a52009-02-12 08:36:33 +00005386 for_each_queue(bp, i) {
5387 u8 cl_id = bp->fp[i].cl_id;
5388
5389 /* reset xstorm per client statistics */
5390 offset = BAR_XSTRORM_INTMEM +
5391 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392 for (j = 0;
5393 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394 REG_WR(bp, offset + j*4, 0);
5395
5396 /* reset tstorm per client statistics */
5397 offset = BAR_TSTRORM_INTMEM +
5398 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399 for (j = 0;
5400 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401 REG_WR(bp, offset + j*4, 0);
5402
5403 /* reset ustorm per client statistics */
5404 offset = BAR_USTRORM_INTMEM +
5405 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406 for (j = 0;
5407 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005409 }
5410
5411 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005412 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005413
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005414 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005415 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005416 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005417 ((u32 *)&stats_flags)[1]);
5418
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005419 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005420 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005421 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005422 ((u32 *)&stats_flags)[1]);
5423
Eilon Greensteinde832a52009-02-12 08:36:33 +00005424 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425 ((u32 *)&stats_flags)[0]);
5426 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427 ((u32 *)&stats_flags)[1]);
5428
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005429 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005430 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005431 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005432 ((u32 *)&stats_flags)[1]);
5433
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005434 REG_WR(bp, BAR_XSTRORM_INTMEM +
5435 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437 REG_WR(bp, BAR_XSTRORM_INTMEM +
5438 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441 REG_WR(bp, BAR_TSTRORM_INTMEM +
5442 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444 REG_WR(bp, BAR_TSTRORM_INTMEM +
5445 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005447
Eilon Greensteinde832a52009-02-12 08:36:33 +00005448 REG_WR(bp, BAR_USTRORM_INTMEM +
5449 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451 REG_WR(bp, BAR_USTRORM_INTMEM +
5452 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005455 if (CHIP_IS_E1H(bp)) {
5456 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457 IS_E1HMF(bp));
5458 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459 IS_E1HMF(bp));
5460 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461 IS_E1HMF(bp));
5462 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463 IS_E1HMF(bp));
5464
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005465 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005467 }
5468
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005469 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470 max_agg_size =
5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5473 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005474 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005475 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005476
5477 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005478 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005479 U64_LO(fp->rx_comp_mapping));
5480 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005481 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005482 U64_HI(fp->rx_comp_mapping));
5483
Eilon Greensteinca003922009-08-12 22:53:28 -07005484 /* Next page */
5485 REG_WR(bp, BAR_USTRORM_INTMEM +
5486 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
5489 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005492 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005493 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005494 max_agg_size);
5495 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005496
Eilon Greenstein1c063282009-02-12 08:36:43 +00005497 /* dropless flow control */
5498 if (CHIP_IS_E1H(bp)) {
5499 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501 rx_pause.bd_thr_low = 250;
5502 rx_pause.cqe_thr_low = 250;
5503 rx_pause.cos = 1;
5504 rx_pause.sge_thr_low = 0;
5505 rx_pause.bd_thr_high = 350;
5506 rx_pause.cqe_thr_high = 350;
5507 rx_pause.sge_thr_high = 0;
5508
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005509 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005510 struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512 if (!fp->disable_tpa) {
5513 rx_pause.sge_thr_low = 150;
5514 rx_pause.sge_thr_high = 250;
5515 }
5516
5517
5518 offset = BAR_USTRORM_INTMEM +
5519 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520 fp->cl_id);
5521 for (j = 0;
5522 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523 j++)
5524 REG_WR(bp, offset + j*4,
5525 ((u32 *)&rx_pause)[j]);
5526 }
5527 }
5528
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005529 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531 /* Init rate shaping and fairness contexts */
5532 if (IS_E1HMF(bp)) {
5533 int vn;
5534
5535 /* During init there is no active link
5536 Until link is up, set link rate to 10Gbps */
5537 bp->link_vars.line_speed = SPEED_10000;
5538 bnx2x_init_port_minmax(bp);
5539
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005540 if (!BP_NOMCP(bp))
5541 bp->mf_config =
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005543 bnx2x_calc_vn_weight_sum(bp);
5544
5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005549 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005551
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005552 } else {
5553 /* rate shaping and fairness are disabled */
5554 DP(NETIF_MSG_IFUP,
5555 "single function mode minmax will be disabled\n");
5556 }
5557
5558
5559 /* Store it to internal memory */
5560 if (bp->port.pmf)
5561 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562 REG_WR(bp, BAR_XSTRORM_INTMEM +
5563 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005565}
5566
Eilon Greenstein471de712008-08-13 15:49:35 -07005567static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568{
5569 switch (load_code) {
5570 case FW_MSG_CODE_DRV_LOAD_COMMON:
5571 bnx2x_init_internal_common(bp);
5572 /* no break */
5573
5574 case FW_MSG_CODE_DRV_LOAD_PORT:
5575 bnx2x_init_internal_port(bp);
5576 /* no break */
5577
5578 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579 bnx2x_init_internal_func(bp);
5580 break;
5581
5582 default:
5583 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584 break;
5585 }
5586}
5587
5588static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005589{
5590 int i;
5591
5592 for_each_queue(bp, i) {
5593 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005595 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005596 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005597 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005598 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005599#ifdef BCM_CNIC
5600 fp->sb_id = fp->cl_id + 1;
5601#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005602 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005603#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005604 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005607 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005608 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005609 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005610 }
5611
Eilon Greenstein16119782009-03-02 07:59:27 +00005612 /* ensure status block indices were read */
5613 rmb();
5614
5615
Eilon Greenstein5c862842008-08-13 15:51:48 -07005616 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617 DEF_SB_ID);
5618 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005619 bnx2x_update_coalesce(bp);
5620 bnx2x_init_rx_rings(bp);
5621 bnx2x_init_tx_ring(bp);
5622 bnx2x_init_sp_ring(bp);
5623 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005624 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005625 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005626 bnx2x_stats_init(bp);
5627
5628 /* At this point, we are ready for interrupts */
5629 atomic_set(&bp->intr_sem, 0);
5630
5631 /* flush all before enabling interrupts */
5632 mb();
5633 mmiowb();
5634
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005635 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005636
5637 /* Check for SPIO5 */
5638 bnx2x_attn_int_deasserted0(bp,
5639 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005641}
5642
5643/* end of nic init */
5644
5645/*
5646 * gzip service functions
5647 */
5648
5649static int bnx2x_gunzip_init(struct bnx2x *bp)
5650{
5651 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652 &bp->gunzip_mapping);
5653 if (bp->gunzip_buf == NULL)
5654 goto gunzip_nomem1;
5655
5656 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657 if (bp->strm == NULL)
5658 goto gunzip_nomem2;
5659
5660 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661 GFP_KERNEL);
5662 if (bp->strm->workspace == NULL)
5663 goto gunzip_nomem3;
5664
5665 return 0;
5666
5667gunzip_nomem3:
5668 kfree(bp->strm);
5669 bp->strm = NULL;
5670
5671gunzip_nomem2:
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5675
5676gunzip_nomem1:
5677 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005678 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005679 return -ENOMEM;
5680}
5681
5682static void bnx2x_gunzip_end(struct bnx2x *bp)
5683{
5684 kfree(bp->strm->workspace);
5685
5686 kfree(bp->strm);
5687 bp->strm = NULL;
5688
5689 if (bp->gunzip_buf) {
5690 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691 bp->gunzip_mapping);
5692 bp->gunzip_buf = NULL;
5693 }
5694}
5695
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005696static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005697{
5698 int n, rc;
5699
5700 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005701 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005703 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005704 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005705
5706 n = 10;
5707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005708#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005709
5710 if (zbuf[3] & FNAME)
5711 while ((zbuf[n++] != 0) && (n < len));
5712
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005713 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005714 bp->strm->avail_in = len - n;
5715 bp->strm->next_out = bp->gunzip_buf;
5716 bp->strm->avail_out = FW_BUF_SIZE;
5717
5718 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719 if (rc != Z_OK)
5720 return rc;
5721
5722 rc = zlib_inflate(bp->strm, Z_FINISH);
5723 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725 bp->dev->name, bp->strm->msg);
5726
5727 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728 if (bp->gunzip_outlen & 0x3)
5729 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730 " gunzip_outlen (%d) not aligned\n",
5731 bp->dev->name, bp->gunzip_outlen);
5732 bp->gunzip_outlen >>= 2;
5733
5734 zlib_inflateEnd(bp->strm);
5735
5736 if (rc == Z_STREAM_END)
5737 return 0;
5738
5739 return rc;
5740}
5741
5742/* nic load/unload */
5743
5744/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005745 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005746 */
5747
5748/* send a NIG loopback debug packet */
5749static void bnx2x_lb_pckt(struct bnx2x *bp)
5750{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005751 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005752
5753 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005754 wb_write[0] = 0x55555555;
5755 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005756 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005757 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005758
5759 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005760 wb_write[0] = 0x09000000;
5761 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005762 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005763 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005764}
5765
5766/* some of the internal memories
5767 * are not directly readable from the driver
5768 * to test them we send debug packets
5769 */
5770static int bnx2x_int_mem_test(struct bnx2x *bp)
5771{
5772 int factor;
5773 int count, i;
5774 u32 val = 0;
5775
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005776 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005777 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005778 else if (CHIP_REV_IS_EMUL(bp))
5779 factor = 200;
5780 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005781 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005782
5783 DP(NETIF_MSG_HW, "start part1\n");
5784
5785 /* Disable inputs of parser neighbor blocks */
5786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005790
5791 /* Write 0 to parser credits for CFC search request */
5792 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794 /* send Ethernet packet */
5795 bnx2x_lb_pckt(bp);
5796
5797 /* TODO do i reset NIG statistic? */
5798 /* Wait until NIG register shows 1 packet of size 0x10 */
5799 count = 1000 * factor;
5800 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005802 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005804 if (val == 0x10)
5805 break;
5806
5807 msleep(10);
5808 count--;
5809 }
5810 if (val != 0x10) {
5811 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5812 return -1;
5813 }
5814
5815 /* Wait until PRS register shows 1 packet */
5816 count = 1000 * factor;
5817 while (count) {
5818 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819 if (val == 1)
5820 break;
5821
5822 msleep(10);
5823 count--;
5824 }
5825 if (val != 0x1) {
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827 return -2;
5828 }
5829
5830 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005831 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005832 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005833 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005834 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005835 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005837
5838 DP(NETIF_MSG_HW, "part2\n");
5839
5840 /* Disable inputs of parser neighbor blocks */
5841 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005844 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005845
5846 /* Write 0 to parser credits for CFC search request */
5847 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849 /* send 10 Ethernet packets */
5850 for (i = 0; i < 10; i++)
5851 bnx2x_lb_pckt(bp);
5852
5853 /* Wait until NIG register shows 10 + 1
5854 packets of size 11*0x10 = 0xb0 */
5855 count = 1000 * factor;
5856 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005858 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005860 if (val == 0xb0)
5861 break;
5862
5863 msleep(10);
5864 count--;
5865 }
5866 if (val != 0xb0) {
5867 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5868 return -3;
5869 }
5870
5871 /* Wait until PRS register shows 2 packets */
5872 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873 if (val != 2)
5874 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5875
5876 /* Write 1 to parser credits for CFC search request */
5877 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879 /* Wait until PRS register shows 3 packets */
5880 msleep(10 * factor);
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883 if (val != 3)
5884 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5885
5886 /* clear NIG EOP FIFO */
5887 for (i = 0; i < 11; i++)
5888 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890 if (val != 1) {
5891 BNX2X_ERR("clear of NIG failed\n");
5892 return -4;
5893 }
5894
5895 /* Reset and init BRB, PRS, NIG */
5896 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897 msleep(50);
5898 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005902#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005903 /* set NIC mode */
5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905#endif
5906
5907 /* Enable inputs of parser neighbor blocks */
5908 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005911 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005912
5913 DP(NETIF_MSG_HW, "done\n");
5914
5915 return 0; /* OK */
5916}
5917
5918static void enable_blocks_attention(struct bnx2x *bp)
5919{
5920 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005929/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005931 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005934/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005936 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005940/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942 if (CHIP_REV_IS_FPGA(bp))
5943 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944 else
5945 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005949/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005951 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005953/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005955}
5956
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005957
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005958static void bnx2x_reset_common(struct bnx2x *bp)
5959{
5960 /* reset_common */
5961 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962 0xd3ffff7f);
5963 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964}
5965
Eilon Greenstein573f2032009-08-12 08:24:14 +00005966static void bnx2x_init_pxp(struct bnx2x *bp)
5967{
5968 u16 devctl;
5969 int r_order, w_order;
5970
5971 pci_read_config_word(bp->pdev,
5972 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975 if (bp->mrrs == -1)
5976 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977 else {
5978 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979 r_order = bp->mrrs;
5980 }
5981
5982 bnx2x_init_pxp_arb(bp, r_order, w_order);
5983}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005984
5985static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986{
5987 u32 val;
5988 u8 port;
5989 u8 is_required = 0;
5990
5991 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992 SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995 is_required = 1;
5996
5997 /*
5998 * The fan failure mechanism is usually related to the PHY type since
5999 * the power consumption of the board is affected by the PHY. Currently,
6000 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001 */
6002 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003 for (port = PORT_0; port < PORT_MAX; port++) {
6004 u32 phy_type =
6005 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006 external_phy_config) &
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008 is_required |=
6009 ((phy_type ==
6010 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006012 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013 (phy_type ==
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015 }
6016
6017 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019 if (is_required == 0)
6020 return;
6021
6022 /* Fan failure is indicated by SPIO 5 */
6023 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026 /* set to active low mode */
6027 val = REG_RD(bp, MISC_REG_SPIO_INT);
6028 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030 REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032 /* enable interrupt to signal the IGU */
6033 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034 val |= (1 << MISC_REGISTERS_SPIO_5);
6035 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036}
6037
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006038static int bnx2x_init_common(struct bnx2x *bp)
6039{
6040 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006041#ifdef BCM_CNIC
6042 u32 wb_write[2];
6043#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044
6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6046
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006047 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006048 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6050
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006051 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006052 if (CHIP_IS_E1H(bp))
6053 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6054
6055 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056 msleep(30);
6057 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006059 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006060 if (CHIP_IS_E1(bp)) {
6061 /* enable HW interrupt from PXP on USDM overflow
6062 bit 16 on INT_MASK_0 */
6063 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006064 }
6065
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006066 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006067 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068
6069#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006070 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006075 /* make sure this value is 0 */
6076 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006078/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006083#endif
6084
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006086#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006090#endif
6091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006092 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006095 /* let the HW do it's magic ... */
6096 msleep(100);
6097 /* finish PXP init */
6098 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099 if (val != 1) {
6100 BNX2X_ERR("PXP2 CFG failed\n");
6101 return -EBUSY;
6102 }
6103 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104 if (val != 1) {
6105 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106 return -EBUSY;
6107 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006109 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006112 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006114 /* clean the DMAE memory */
6115 bp->dmae_ready = 1;
6116 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006118 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006123 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006129
6130#ifdef BCM_CNIC
6131 wb_write[0] = 0;
6132 wb_write[1] = 0;
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140 wb_write, 2);
6141 }
6142 }
6143#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006144 /* soft reset pulse */
6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147
Michael Chan37b091b2009-10-10 13:46:55 +00006148#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006150#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006152 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006153 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154 if (!CHIP_REV_IS_SLOW(bp)) {
6155 /* enable hw interrupt from doorbell Q */
6156 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006158
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006162#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006163 /* set NIC mode */
6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006165#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006166 if (CHIP_IS_E1H(bp))
6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006168
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006169 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006173
Eilon Greensteinca003922009-08-12 22:53:28 -07006174 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006178
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006179 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006183
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006184 /* sync semi rtc */
6185 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186 0x80000000);
6187 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006189
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006190 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006194 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196 REG_WR(bp, i, 0xc0cac01a);
6197 /* TODO: replace with something meaningful */
6198 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006200#ifdef BCM_CNIC
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006213
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006214 if (sizeof(union cdu_context) != 1024)
6215 /* we currently assume that a context is 1024 bytes */
6216 printk(KERN_ALERT PFX "please adjust the size of"
6217 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006218
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006219 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006220 val = (4 << 24) + (0 << 12) + 1024;
6221 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006222
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006223 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006224 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006225 /* enable context validation interrupt from CFC */
6226 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228 /* set the thresholds to prevent CFC/CDU race */
6229 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006230
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006231 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006233
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006234 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006235 /* Reset PCIE errors for debug */
6236 REG_WR(bp, 0x2814, 0xffffffff);
6237 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006238
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006239 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006240 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006241 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006242 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006243
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006244 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006245 if (CHIP_IS_E1H(bp)) {
6246 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006250 if (CHIP_REV_IS_SLOW(bp))
6251 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006253 /* finish CFC init */
6254 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255 if (val != 1) {
6256 BNX2X_ERR("CFC LL_INIT failed\n");
6257 return -EBUSY;
6258 }
6259 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260 if (val != 1) {
6261 BNX2X_ERR("CFC AC_INIT failed\n");
6262 return -EBUSY;
6263 }
6264 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265 if (val != 1) {
6266 BNX2X_ERR("CFC CAM_INIT failed\n");
6267 return -EBUSY;
6268 }
6269 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006270
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006271 /* read NIG statistic
6272 to see if this is our first up since powerup */
6273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006275
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006276 /* do internal memory self test */
6277 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278 BNX2X_ERR("internal mem self test failed\n");
6279 return -EBUSY;
6280 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006281
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006282 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006283 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006287 bp->port.need_hw_lock = 1;
6288 break;
6289
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006290 default:
6291 break;
6292 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006293
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006294 bnx2x_setup_fan_failure_detection(bp);
6295
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006296 /* clear PXP2 attentions */
6297 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006298
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006299 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006300
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006301 if (!BP_NOMCP(bp)) {
6302 bnx2x_acquire_phy_lock(bp);
6303 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304 bnx2x_release_phy_lock(bp);
6305 } else
6306 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006308 return 0;
6309}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006310
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006311static int bnx2x_init_port(struct bnx2x *bp)
6312{
6313 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006314 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006315 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006316 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006317
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006318 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6319
6320 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006321
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006322 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006323 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006324
6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006329
Michael Chan37b091b2009-10-10 13:46:55 +00006330#ifdef BCM_CNIC
6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006332
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006336#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006338
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006339 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006340 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341 /* no pause for emulation and FPGA */
6342 low = 0;
6343 high = 513;
6344 } else {
6345 if (IS_E1HMF(bp))
6346 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347 else if (bp->dev->mtu > 4096) {
6348 if (bp->flags & ONE_PORT_FLAG)
6349 low = 160;
6350 else {
6351 val = bp->dev->mtu;
6352 /* (24*1024 + val*4)/256 */
6353 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354 }
6355 } else
6356 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357 high = low + 56; /* 14*1024/256 */
6358 }
6359 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006363 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006364
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006365 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006366 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006367 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006368 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006369
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006370 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006374
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006375 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006376 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006377
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006378 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006379
6380 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006381 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006382
6383 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006384 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006385 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006386 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006387
6388 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006389 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006390 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006392
Michael Chan37b091b2009-10-10 13:46:55 +00006393#ifdef BCM_CNIC
6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006395#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006398
6399 if (CHIP_IS_E1(bp)) {
6400 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006403 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006404
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006405 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006406 /* init aeu_mask_attn_func_0/1:
6407 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409 * bits 4-7 are used for "per vn group attention" */
6410 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006413 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006414 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006415 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006416 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006417 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006418
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006419 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006420
6421 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006424 /* 0x2 disable e1hov, 0x1 enable */
6425 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426 (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
Eilon Greenstein1c063282009-02-12 08:36:43 +00006428 {
6429 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006433 }
6434
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006435 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006436 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006437
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006438 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440 {
6441 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446 /* The GPIO should be swapped if the swap register is
6447 set and active */
6448 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451 /* Select function upon port-swap configuration */
6452 if (port == 0) {
6453 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454 aeu_gpio_mask = (swap_val && swap_override) ?
6455 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457 } else {
6458 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459 aeu_gpio_mask = (swap_val && swap_override) ?
6460 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462 }
6463 val = REG_RD(bp, offset);
6464 /* add GPIO3 to group */
6465 val |= aeu_gpio_mask;
6466 REG_WR(bp, offset, val);
6467 }
6468 break;
6469
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006471 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006472 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006473 {
6474 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006477 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006478 REG_WR(bp, reg_addr, val);
6479 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006480 break;
6481
6482 default:
6483 break;
6484 }
6485
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006486 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006488 return 0;
6489}
6490
6491#define ILT_PER_FUNC (768/2)
6492#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6493/* the phys address is shifted right 12 bits and has an added
6494 1=valid bit added to the 53rd bit
6495 then since this is a wide register(TM)
6496 we split it into two 32 bit writes
6497 */
6498#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6500#define PXP_ONE_ILT(x) (((x) << 10) | x)
6501#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6502
Michael Chan37b091b2009-10-10 13:46:55 +00006503#ifdef BCM_CNIC
6504#define CNIC_ILT_LINES 127
6505#define CNIC_CTX_PER_ILT 16
6506#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006507#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006508#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006509
6510static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511{
6512 int reg;
6513
6514 if (CHIP_IS_E1H(bp))
6515 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516 else /* E1 */
6517 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520}
6521
6522static int bnx2x_init_func(struct bnx2x *bp)
6523{
6524 int port = BP_PORT(bp);
6525 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006526 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006527 int i;
6528
6529 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6530
Eilon Greenstein8badd272009-02-12 08:36:15 +00006531 /* set MSI reconfigure capability */
6532 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533 val = REG_RD(bp, addr);
6534 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535 REG_WR(bp, addr, val);
6536
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006537 i = FUNC_ILT_BASE(func);
6538
6539 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540 if (CHIP_IS_E1H(bp)) {
6541 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543 } else /* E1 */
6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
Michael Chan37b091b2009-10-10 13:46:55 +00006547#ifdef BCM_CNIC
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550 if (CHIP_IS_E1(bp))
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552 else {
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555 }
6556
6557 i++;
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573 }
6574
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006587
6588 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006598
6599 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601 }
6602
6603 /* HC init per function */
6604 if (CHIP_IS_E1H(bp)) {
6605 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006610 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006611
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006612 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006613 REG_WR(bp, 0x2114, 0xffffffff);
6614 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006615
6616 return 0;
6617}
6618
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006619static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620{
6621 int i, rc = 0;
6622
6623 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6624 BP_FUNC(bp), load_code);
6625
6626 bp->dmae_ready = 0;
6627 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006628 rc = bnx2x_gunzip_init(bp);
6629 if (rc)
6630 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006631
6632 switch (load_code) {
6633 case FW_MSG_CODE_DRV_LOAD_COMMON:
6634 rc = bnx2x_init_common(bp);
6635 if (rc)
6636 goto init_hw_err;
6637 /* no break */
6638
6639 case FW_MSG_CODE_DRV_LOAD_PORT:
6640 bp->dmae_ready = 1;
6641 rc = bnx2x_init_port(bp);
6642 if (rc)
6643 goto init_hw_err;
6644 /* no break */
6645
6646 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647 bp->dmae_ready = 1;
6648 rc = bnx2x_init_func(bp);
6649 if (rc)
6650 goto init_hw_err;
6651 break;
6652
6653 default:
6654 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655 break;
6656 }
6657
6658 if (!BP_NOMCP(bp)) {
6659 int func = BP_FUNC(bp);
6660
6661 bp->fw_drv_pulse_wr_seq =
6662 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006664 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666
6667 /* this needs to be done before gunzip end */
6668 bnx2x_zero_def_sb(bp);
6669 for_each_queue(bp, i)
6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006671#ifdef BCM_CNIC
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006674
6675init_hw_err:
6676 bnx2x_gunzip_end(bp);
6677
6678 return rc;
6679}
6680
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006681static void bnx2x_free_mem(struct bnx2x *bp)
6682{
6683
6684#define BNX2X_PCI_FREE(x, y, size) \
6685 do { \
6686 if (x) { \
6687 pci_free_consistent(bp->pdev, size, x, y); \
6688 x = NULL; \
6689 y = 0; \
6690 } \
6691 } while (0)
6692
6693#define BNX2X_FREE(x) \
6694 do { \
6695 if (x) { \
6696 vfree(x); \
6697 x = NULL; \
6698 } \
6699 } while (0)
6700
6701 int i;
6702
6703 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006704 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006705 for_each_queue(bp, i) {
6706
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006707 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006708 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006710 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006711 }
6712 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006713 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006714
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718 bnx2x_fp(bp, i, rx_desc_mapping),
6719 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722 bnx2x_fp(bp, i, rx_comp_mapping),
6723 sizeof(struct eth_fast_path_rx_cqe) *
6724 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006725
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006726 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006727 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006728 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729 bnx2x_fp(bp, i, rx_sge_mapping),
6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006732 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006733 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006734
6735 /* fastpath tx rings: tx_buf tx_desc */
6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006739 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006740 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006741 /* end of fastpath */
6742
6743 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006744 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006745
6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006747 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006748
Michael Chan37b091b2009-10-10 13:46:55 +00006749#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006756#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006758
6759#undef BNX2X_PCI_FREE
6760#undef BNX2X_KFREE
6761}
6762
6763static int bnx2x_alloc_mem(struct bnx2x *bp)
6764{
6765
6766#define BNX2X_PCI_ALLOC(x, y, size) \
6767 do { \
6768 x = pci_alloc_consistent(bp->pdev, size, y); \
6769 if (x == NULL) \
6770 goto alloc_mem_err; \
6771 memset(x, 0, size); \
6772 } while (0)
6773
6774#define BNX2X_ALLOC(x, size) \
6775 do { \
6776 x = vmalloc(size); \
6777 if (x == NULL) \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6780 } while (0)
6781
6782 int i;
6783
6784 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006785 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786 for_each_queue(bp, i) {
6787 bnx2x_fp(bp, i, bp) = bp;
6788
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006789 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006790 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006792 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006793 }
6794 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006795 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006796
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801 &bnx2x_fp(bp, i, rx_desc_mapping),
6802 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805 &bnx2x_fp(bp, i, rx_comp_mapping),
6806 sizeof(struct eth_fast_path_rx_cqe) *
6807 NUM_RCQ_BD);
6808
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006809 /* SGE ring */
6810 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813 &bnx2x_fp(bp, i, rx_sge_mapping),
6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006815 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006816 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006817 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006818
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006819 /* fastpath tx rings: tx_buf tx_desc */
6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006824 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006825 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006826 /* end of fastpath */
6827
6828 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829 sizeof(struct host_def_status_block));
6830
6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832 sizeof(struct bnx2x_slowpath));
6833
Michael Chan37b091b2009-10-10 13:46:55 +00006834#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006837 /* allocate searcher T2 table
6838 we allocate 1/4 of alloc num for T2
6839 (which is not entered into the ILT) */
6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
Michael Chan37b091b2009-10-10 13:46:55 +00006842 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006843 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006845
Michael Chan37b091b2009-10-10 13:46:55 +00006846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849 /* QM queues (128*MAX_CONN) */
6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006851
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006854#endif
6855
6856 /* Slow path ring */
6857 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859 return 0;
6860
6861alloc_mem_err:
6862 bnx2x_free_mem(bp);
6863 return -ENOMEM;
6864
6865#undef BNX2X_PCI_ALLOC
6866#undef BNX2X_ALLOC
6867}
6868
6869static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870{
6871 int i;
6872
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006873 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006874 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876 u16 bd_cons = fp->tx_bd_cons;
6877 u16 sw_prod = fp->tx_pkt_prod;
6878 u16 sw_cons = fp->tx_pkt_cons;
6879
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006880 while (sw_cons != sw_prod) {
6881 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882 sw_cons++;
6883 }
6884 }
6885}
6886
6887static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888{
6889 int i, j;
6890
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006891 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006892 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006894 for (i = 0; i < NUM_RX_BD; i++) {
6895 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896 struct sk_buff *skb = rx_buf->skb;
6897
6898 if (skb == NULL)
6899 continue;
6900
6901 pci_unmap_single(bp->pdev,
6902 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006903 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006904
6905 rx_buf->skb = NULL;
6906 dev_kfree_skb(skb);
6907 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006908 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006909 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006911 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006912 }
6913}
6914
6915static void bnx2x_free_skbs(struct bnx2x *bp)
6916{
6917 bnx2x_free_tx_skbs(bp);
6918 bnx2x_free_rx_skbs(bp);
6919}
6920
6921static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006923 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006924
6925 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006927 bp->msix_table[0].vector);
6928
Michael Chan37b091b2009-10-10 13:46:55 +00006929#ifdef BCM_CNIC
6930 offset++;
6931#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006932 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006934 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006935 bnx2x_fp(bp, i, state));
6936
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006937 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006938 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939}
6940
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006941static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006942{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006943 if (bp->flags & USING_MSIX_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006944 if (!disable_only)
6945 bnx2x_free_msix_irqs(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006946 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006947 bp->flags &= ~USING_MSIX_FLAG;
6948
Eilon Greenstein8badd272009-02-12 08:36:15 +00006949 } else if (bp->flags & USING_MSI_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006950 if (!disable_only)
6951 free_irq(bp->pdev->irq, bp->dev);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006952 pci_disable_msi(bp->pdev);
6953 bp->flags &= ~USING_MSI_FLAG;
6954
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006955 } else if (!disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006956 free_irq(bp->pdev->irq, bp->dev);
6957}
6958
6959static int bnx2x_enable_msix(struct bnx2x *bp)
6960{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006961 int i, rc, offset = 1;
6962 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006963
Eilon Greenstein8badd272009-02-12 08:36:15 +00006964 bp->msix_table[0].entry = igu_vec;
6965 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006966
Michael Chan37b091b2009-10-10 13:46:55 +00006967#ifdef BCM_CNIC
6968 igu_vec = BP_L_ID(bp) + offset;
6969 bp->msix_table[1].entry = igu_vec;
6970 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6971 offset++;
6972#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006973 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006974 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006975 bp->msix_table[i + offset].entry = igu_vec;
6976 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6977 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006978 }
6979
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006980 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006981 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006982 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006983 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6984 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006985 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006986
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006987 bp->flags |= USING_MSIX_FLAG;
6988
6989 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006990}
6991
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006992static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6993{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006994 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006995
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006996 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6997 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006998 if (rc) {
6999 BNX2X_ERR("request sp irq failed\n");
7000 return -EBUSY;
7001 }
7002
Michael Chan37b091b2009-10-10 13:46:55 +00007003#ifdef BCM_CNIC
7004 offset++;
7005#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007006 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007007 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007008 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7009 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007010
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007011 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007012 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007013 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007014 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007015 bnx2x_free_msix_irqs(bp);
7016 return -EBUSY;
7017 }
7018
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007019 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007020 }
7021
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007022 i = BNX2X_NUM_QUEUES(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007023 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7024 " ... fp[%d] %d\n",
7025 bp->dev->name, bp->msix_table[0].vector,
7026 0, bp->msix_table[offset].vector,
7027 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007028
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007029 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007030}
7031
Eilon Greenstein8badd272009-02-12 08:36:15 +00007032static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007033{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007034 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007035
Eilon Greenstein8badd272009-02-12 08:36:15 +00007036 rc = pci_enable_msi(bp->pdev);
7037 if (rc) {
7038 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7039 return -1;
7040 }
7041 bp->flags |= USING_MSI_FLAG;
7042
7043 return 0;
7044}
7045
7046static int bnx2x_req_irq(struct bnx2x *bp)
7047{
7048 unsigned long flags;
7049 int rc;
7050
7051 if (bp->flags & USING_MSI_FLAG)
7052 flags = 0;
7053 else
7054 flags = IRQF_SHARED;
7055
7056 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007057 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007058 if (!rc)
7059 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7060
7061 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007062}
7063
Yitchak Gertner65abd742008-08-25 15:26:24 -07007064static void bnx2x_napi_enable(struct bnx2x *bp)
7065{
7066 int i;
7067
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007068 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007069 napi_enable(&bnx2x_fp(bp, i, napi));
7070}
7071
7072static void bnx2x_napi_disable(struct bnx2x *bp)
7073{
7074 int i;
7075
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007076 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007077 napi_disable(&bnx2x_fp(bp, i, napi));
7078}
7079
7080static void bnx2x_netif_start(struct bnx2x *bp)
7081{
Eilon Greensteine1510702009-07-21 05:47:41 +00007082 int intr_sem;
7083
7084 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7085 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7086
7087 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007088 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007089 bnx2x_napi_enable(bp);
7090 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007091 if (bp->state == BNX2X_STATE_OPEN)
7092 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007093 }
7094 }
7095}
7096
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007097static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007098{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007099 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007100 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007101 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007102}
7103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007104/*
7105 * Init service functions
7106 */
7107
Michael Chane665bfd2009-10-10 13:46:54 +00007108/**
7109 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7110 *
7111 * @param bp driver descriptor
7112 * @param set set or clear an entry (1 or 0)
7113 * @param mac pointer to a buffer containing a MAC
7114 * @param cl_bit_vec bit vector of clients to register a MAC for
7115 * @param cam_offset offset in a CAM to use
7116 * @param with_bcast set broadcast MAC as well
7117 */
7118static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119 u32 cl_bit_vec, u8 cam_offset,
7120 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007121{
7122 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007123 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007124
7125 /* CAM allocation
7126 * unicasts 0-31:port0 32-63:port1
7127 * multicast 64-127:port0 128-191:port1
7128 */
Michael Chane665bfd2009-10-10 13:46:54 +00007129 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130 config->hdr.offset = cam_offset;
7131 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007132 config->hdr.reserved1 = 0;
7133
7134 /* primary MAC */
7135 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007136 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007137 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007138 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007139 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007140 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007141 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007142 if (set)
7143 config->config_table[0].target_table_entry.flags = 0;
7144 else
7145 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007146 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007147 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007148 config->config_table[0].target_table_entry.vlan_id = 0;
7149
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007150 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007152 config->config_table[0].cam_entry.msb_mac_addr,
7153 config->config_table[0].cam_entry.middle_mac_addr,
7154 config->config_table[0].cam_entry.lsb_mac_addr);
7155
7156 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007157 if (with_bcast) {
7158 config->config_table[1].cam_entry.msb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.middle_mac_addr =
7161 cpu_to_le16(0xffff);
7162 config->config_table[1].cam_entry.lsb_mac_addr =
7163 cpu_to_le16(0xffff);
7164 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7165 if (set)
7166 config->config_table[1].target_table_entry.flags =
7167 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7168 else
7169 CAM_INVALIDATE(config->config_table[1]);
7170 config->config_table[1].target_table_entry.clients_bit_vector =
7171 cpu_to_le32(cl_bit_vec);
7172 config->config_table[1].target_table_entry.vlan_id = 0;
7173 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007174
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7178}
7179
Michael Chane665bfd2009-10-10 13:46:54 +00007180/**
7181 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7182 *
7183 * @param bp driver descriptor
7184 * @param set set or clear an entry (1 or 0)
7185 * @param mac pointer to a buffer containing a MAC
7186 * @param cl_bit_vec bit vector of clients to register a MAC for
7187 * @param cam_offset offset in a CAM to use
7188 */
7189static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007191{
7192 struct mac_configuration_cmd_e1h *config =
7193 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7194
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007195 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007196 config->hdr.offset = cam_offset;
7197 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007198 config->hdr.reserved1 = 0;
7199
7200 /* primary MAC */
7201 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007202 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007203 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007204 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007205 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007206 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007207 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007208 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007209 config->config_table[0].vlan_id = 0;
7210 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007211 if (set)
7212 config->config_table[0].flags = BP_PORT(bp);
7213 else
7214 config->config_table[0].flags =
7215 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007216
Michael Chane665bfd2009-10-10 13:46:54 +00007217 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007218 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007219 config->config_table[0].msb_mac_addr,
7220 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007221 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007222
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7226}
7227
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007228static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229 int *state_p, int poll)
7230{
7231 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007232 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007233
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007234 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007236
7237 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007238 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007239 if (poll) {
7240 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007241 /* if index is different from 0
7242 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007243 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007244 */
7245 if (idx)
7246 bnx2x_rx_int(&bp->fp[idx], 10);
7247 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007248
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007249 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007250 if (*state_p == state) {
7251#ifdef BNX2X_STOP_ON_ERROR
7252 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7253#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007254 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007255 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007256
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007257 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007258
7259 if (bp->panic)
7260 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007261 }
7262
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007263 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007264 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007266#ifdef BNX2X_STOP_ON_ERROR
7267 bnx2x_panic();
7268#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007269
Eliezer Tamir49d66772008-02-28 11:53:13 -08007270 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007271}
7272
Michael Chane665bfd2009-10-10 13:46:54 +00007273static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7274{
7275 bp->set_mac_pending++;
7276 smp_wmb();
7277
7278 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279 (1 << bp->fp->cl_id), BP_FUNC(bp));
7280
7281 /* Wait for a completion */
7282 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7283}
7284
7285static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7286{
7287 bp->set_mac_pending++;
7288 smp_wmb();
7289
7290 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7292 1);
7293
7294 /* Wait for a completion */
7295 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7296}
7297
Michael Chan993ac7b2009-10-10 13:46:56 +00007298#ifdef BCM_CNIC
7299/**
7300 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301 * MAC(s). This function will wait until the ramdord completion
7302 * returns.
7303 *
7304 * @param bp driver handle
7305 * @param set set or clear the CAM entry
7306 *
7307 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7308 */
7309static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7310{
7311 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7312
7313 bp->set_mac_pending++;
7314 smp_wmb();
7315
7316 /* Send a SET_MAC ramrod */
7317 if (CHIP_IS_E1(bp))
7318 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7320 1);
7321 else
7322 /* CAM allocation for E1H
7323 * unicasts: by func number
7324 * multicast: 20+FUNC*20, 20 each
7325 */
7326 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7328
7329 /* Wait for a completion when setting */
7330 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7331
7332 return 0;
7333}
7334#endif
7335
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007336static int bnx2x_setup_leading(struct bnx2x *bp)
7337{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007338 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007339
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007340 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007341 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007342
7343 /* SETUP ramrod */
7344 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7345
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007346 /* Wait for completion */
7347 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007348
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007349 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350}
7351
7352static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7353{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007354 struct bnx2x_fastpath *fp = &bp->fp[index];
7355
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007356 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007357 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007358
Eliezer Tamir228241e2008-02-28 11:56:57 -08007359 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007360 fp->state = BNX2X_FP_STATE_OPENING;
7361 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007363
7364 /* Wait for completion */
7365 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007366 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007367}
7368
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007369static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007370
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007371static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007372{
Eilon Greensteinca003922009-08-12 22:53:28 -07007373
7374 switch (bp->multi_mode) {
7375 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007376 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007377 break;
7378
7379 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007380 if (num_queues)
7381 bp->num_queues = min_t(u32, num_queues,
7382 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007383 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007384 bp->num_queues = min_t(u32, num_online_cpus(),
7385 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007386 break;
7387
7388
7389 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007390 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007391 break;
7392 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007393}
7394
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007395static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007396{
7397 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007398
Eilon Greenstein8badd272009-02-12 08:36:15 +00007399 switch (int_mode) {
7400 case INT_MODE_INTx:
7401 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007402 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007403 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007404 break;
7405
7406 case INT_MODE_MSIX:
7407 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007408 /* Set number of queues according to bp->multi_mode value */
7409 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007410
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007411 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007413
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007414 /* if we can't use MSI-X we only need one fp,
7415 * so try to enable MSI-X with the requested number of fp's
7416 * and fallback to MSI or legacy INTx with one fp
7417 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007418 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007419 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007420 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007421 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007422 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007423 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007424 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007425 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007426}
7427
Michael Chan993ac7b2009-10-10 13:46:56 +00007428#ifdef BCM_CNIC
7429static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007432
7433/* must be called with rtnl_lock */
7434static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7435{
7436 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007437 int i, rc;
7438
Eilon Greenstein8badd272009-02-12 08:36:15 +00007439#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007440 if (unlikely(bp->panic))
7441 return -EPERM;
7442#endif
7443
7444 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7445
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007446 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007447
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007448 if (bnx2x_alloc_mem(bp)) {
7449 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007450 return -ENOMEM;
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007451 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007452
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007453 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007454 bnx2x_fp(bp, i, disable_tpa) =
7455 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7456
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007457 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007458 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7459 bnx2x_poll, 128);
7460
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007461 bnx2x_napi_enable(bp);
7462
7463 if (bp->flags & USING_MSIX_FLAG) {
7464 rc = bnx2x_req_msix_irqs(bp);
7465 if (rc) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007466 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007467 goto load_error1;
7468 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007469 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007470 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007471 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007472 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7473 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007474 bnx2x_ack_int(bp);
7475 rc = bnx2x_req_irq(bp);
7476 if (rc) {
7477 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007478 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007479 goto load_error1;
7480 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007481 if (bp->flags & USING_MSI_FLAG) {
7482 bp->dev->irq = bp->pdev->irq;
7483 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7484 bp->dev->name, bp->pdev->irq);
7485 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007486 }
7487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007488 /* Send LOAD_REQUEST command to MCP
7489 Returns the type of LOAD command:
7490 if it is the first port to be initialized
7491 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007492 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007493 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007494 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7495 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007496 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007497 rc = -EBUSY;
7498 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007499 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007500 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7501 rc = -EBUSY; /* other port in diagnostic mode */
7502 goto load_error2;
7503 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007504
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007505 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007506 int port = BP_PORT(bp);
7507
Eilon Greensteinf5372252009-02-12 08:38:30 +00007508 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007509 load_count[0], load_count[1], load_count[2]);
7510 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007511 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007512 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007513 load_count[0], load_count[1], load_count[2]);
7514 if (load_count[0] == 1)
7515 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007516 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007517 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7518 else
7519 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007520 }
7521
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007522 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7523 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7524 bp->port.pmf = 1;
7525 else
7526 bp->port.pmf = 0;
7527 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7528
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007529 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007530 rc = bnx2x_init_hw(bp, load_code);
7531 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007532 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007533 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007534 }
7535
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007536 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007537 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007538
Eilon Greenstein2691d512009-08-12 08:22:08 +00007539 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7540 (bp->common.shmem2_base))
7541 SHMEM2_WR(bp, dcc_support,
7542 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7543 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007545 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007546 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007547 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7548 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007549 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007550 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007551 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007552 }
7553 }
7554
7555 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7556
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007557 rc = bnx2x_setup_leading(bp);
7558 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007559 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007560#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007561 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007562#else
7563 bp->panic = 1;
7564 return -EBUSY;
7565#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007566 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007567
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007568 if (CHIP_IS_E1H(bp))
7569 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007570 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007571 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007572 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007573
Eilon Greensteinca003922009-08-12 22:53:28 -07007574 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007575#ifdef BCM_CNIC
7576 /* Enable Timer scan */
7577 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7578#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007579 for_each_nondefault_queue(bp, i) {
7580 rc = bnx2x_setup_multi(bp, i);
7581 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007582#ifdef BCM_CNIC
7583 goto load_error4;
7584#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007585 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007586#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007587 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007588
Eilon Greensteinca003922009-08-12 22:53:28 -07007589 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007590 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007591 else
Michael Chane665bfd2009-10-10 13:46:54 +00007592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007593#ifdef BCM_CNIC
7594 /* Set iSCSI L2 MAC */
7595 mutex_lock(&bp->cnic_mutex);
7596 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7597 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7598 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
Michael Chan4a6e47a2009-12-25 17:13:07 -08007599 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7600 CNIC_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00007601 }
7602 mutex_unlock(&bp->cnic_mutex);
7603#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007604 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007605
7606 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007607 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007608
7609 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007610 switch (load_mode) {
7611 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007612 if (bp->state == BNX2X_STATE_OPEN) {
7613 /* Tx queue should be only reenabled */
7614 netif_tx_wake_all_queues(bp->dev);
7615 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007616 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007617 bnx2x_set_rx_mode(bp->dev);
7618 break;
7619
7620 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007621 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007622 if (bp->state != BNX2X_STATE_OPEN)
7623 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007624 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007625 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007626 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007627
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007628 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007629 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007630 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007631 bp->state = BNX2X_STATE_DIAG;
7632 break;
7633
7634 default:
7635 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007636 }
7637
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007638 if (!bp->port.pmf)
7639 bnx2x__link_status_update(bp);
7640
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007641 /* start the timer */
7642 mod_timer(&bp->timer, jiffies + bp->current_interval);
7643
Michael Chan993ac7b2009-10-10 13:46:56 +00007644#ifdef BCM_CNIC
7645 bnx2x_setup_cnic_irq_info(bp);
7646 if (bp->state == BNX2X_STATE_OPEN)
7647 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7648#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007649
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007650 return 0;
7651
Michael Chan37b091b2009-10-10 13:46:55 +00007652#ifdef BCM_CNIC
7653load_error4:
7654 /* Disable Timer scan */
7655 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7656#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007657load_error3:
7658 bnx2x_int_disable_sync(bp, 1);
7659 if (!BP_NOMCP(bp)) {
7660 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7661 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7662 }
7663 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007664 /* Free SKBs, SGEs, TPA pool and driver internals */
7665 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007666 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007667 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007668load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007669 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007670 bnx2x_free_irq(bp, false);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007671load_error1:
7672 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007673 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007674 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007675 bnx2x_free_mem(bp);
7676
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007677 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007678}
7679
7680static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7681{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007682 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007683 int rc;
7684
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007685 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007686 fp->state = BNX2X_FP_STATE_HALTING;
7687 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007688
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007689 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007690 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007691 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007692 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007693 return rc;
7694
7695 /* delete cfc entry */
7696 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7697
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007698 /* Wait for completion */
7699 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007700 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007701 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007702}
7703
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007704static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007705{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007706 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007707 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007708 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007709 int cnt = 500;
7710 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007711
7712 might_sleep();
7713
7714 /* Send HALT ramrod */
7715 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007716 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007717
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007718 /* Wait for completion */
7719 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7720 &(bp->fp[0].state), 1);
7721 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007722 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007723
Eliezer Tamir49d66772008-02-28 11:53:13 -08007724 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007725
Eliezer Tamir228241e2008-02-28 11:56:57 -08007726 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007727 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7728
Eliezer Tamir49d66772008-02-28 11:53:13 -08007729 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007730 we are going to reset the chip anyway
7731 so there is not much to do if this times out
7732 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007733 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007734 if (!cnt) {
7735 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7736 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7737 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7738#ifdef BNX2X_STOP_ON_ERROR
7739 bnx2x_panic();
7740#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007741 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007742 break;
7743 }
7744 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007745 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007746 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007747 }
7748 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7749 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007750
7751 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007752}
7753
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007754static void bnx2x_reset_func(struct bnx2x *bp)
7755{
7756 int port = BP_PORT(bp);
7757 int func = BP_FUNC(bp);
7758 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007759
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007760 /* Configure IGU */
7761 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7762 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7763
Michael Chan37b091b2009-10-10 13:46:55 +00007764#ifdef BCM_CNIC
7765 /* Disable Timer scan */
7766 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7767 /*
7768 * Wait for at least 10ms and up to 2 second for the timers scan to
7769 * complete
7770 */
7771 for (i = 0; i < 200; i++) {
7772 msleep(10);
7773 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7774 break;
7775 }
7776#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007777 /* Clear ILT */
7778 base = FUNC_ILT_BASE(func);
7779 for (i = base; i < base + ILT_PER_FUNC; i++)
7780 bnx2x_ilt_wr(bp, i, 0);
7781}
7782
7783static void bnx2x_reset_port(struct bnx2x *bp)
7784{
7785 int port = BP_PORT(bp);
7786 u32 val;
7787
7788 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7789
7790 /* Do not rcv packets to BRB */
7791 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7792 /* Do not direct rcv packets that are not for MCP to the BRB */
7793 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7794 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7795
7796 /* Configure AEU */
7797 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7798
7799 msleep(100);
7800 /* Check for BRB port occupancy */
7801 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7802 if (val)
7803 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007804 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007805
7806 /* TODO: Close Doorbell port? */
7807}
7808
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007809static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7810{
7811 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7812 BP_FUNC(bp), reset_code);
7813
7814 switch (reset_code) {
7815 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7816 bnx2x_reset_port(bp);
7817 bnx2x_reset_func(bp);
7818 bnx2x_reset_common(bp);
7819 break;
7820
7821 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7822 bnx2x_reset_port(bp);
7823 bnx2x_reset_func(bp);
7824 break;
7825
7826 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7827 bnx2x_reset_func(bp);
7828 break;
7829
7830 default:
7831 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7832 break;
7833 }
7834}
7835
Eilon Greenstein33471622008-08-13 15:59:08 -07007836/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007837static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007838{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007839 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007840 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007841 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007842
Michael Chan993ac7b2009-10-10 13:46:56 +00007843#ifdef BCM_CNIC
7844 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7845#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007846 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7847
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007848 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007849 bp->rx_mode = BNX2X_RX_MODE_NONE;
7850 bnx2x_set_storm_rx_mode(bp);
7851
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007852 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007853 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007854
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007855 del_timer_sync(&bp->timer);
7856 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7857 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007858 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007859
Eilon Greenstein70b99862009-01-14 06:43:48 +00007860 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007861 bnx2x_free_irq(bp, false);
Eilon Greenstein70b99862009-01-14 06:43:48 +00007862
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007863 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007864 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007865 struct bnx2x_fastpath *fp = &bp->fp[i];
7866
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007867 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007868 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007869
Eilon Greenstein7961f792009-03-02 07:59:31 +00007870 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007871 if (!cnt) {
7872 BNX2X_ERR("timeout waiting for queue[%d]\n",
7873 i);
7874#ifdef BNX2X_STOP_ON_ERROR
7875 bnx2x_panic();
7876 return -EBUSY;
7877#else
7878 break;
7879#endif
7880 }
7881 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007882 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007883 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007884 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007885 /* Give HW time to discard old tx messages */
7886 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007887
Yitchak Gertner65abd742008-08-25 15:26:24 -07007888 if (CHIP_IS_E1(bp)) {
7889 struct mac_configuration_cmd *config =
7890 bnx2x_sp(bp, mcast_config);
7891
Michael Chane665bfd2009-10-10 13:46:54 +00007892 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007893
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007894 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007895 CAM_INVALIDATE(config->config_table[i]);
7896
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007897 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007898 if (CHIP_REV_IS_SLOW(bp))
7899 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7900 else
7901 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007902 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007903 config->hdr.reserved1 = 0;
7904
Michael Chane665bfd2009-10-10 13:46:54 +00007905 bp->set_mac_pending++;
7906 smp_wmb();
7907
Yitchak Gertner65abd742008-08-25 15:26:24 -07007908 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7909 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7910 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7911
7912 } else { /* E1H */
7913 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7914
Michael Chane665bfd2009-10-10 13:46:54 +00007915 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007916
7917 for (i = 0; i < MC_HASH_SIZE; i++)
7918 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007919
7920 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007921 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007922#ifdef BCM_CNIC
7923 /* Clear iSCSI L2 MAC */
7924 mutex_lock(&bp->cnic_mutex);
7925 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7926 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7927 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7928 }
7929 mutex_unlock(&bp->cnic_mutex);
7930#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007931
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007932 if (unload_mode == UNLOAD_NORMAL)
7933 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007934
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007935 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007936 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007937
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007938 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007939 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007940 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007941 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007942 /* The mac address is written to entries 1-4 to
7943 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007944 u8 entry = (BP_E1HVN(bp) + 1)*8;
7945
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007946 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007947 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007948
7949 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7950 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007951 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007952
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007954
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007955 } else
7956 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7957
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007958 /* Close multi and leading connections
7959 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007960 for_each_nondefault_queue(bp, i)
7961 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007962 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007963
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007964 rc = bnx2x_stop_leading(bp);
7965 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007966 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007967#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007968 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007969#else
7970 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007971#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007972 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007973
Eliezer Tamir228241e2008-02-28 11:56:57 -08007974unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007975 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007976 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007977 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007978 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007979 load_count[0], load_count[1], load_count[2]);
7980 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007981 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007982 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007983 load_count[0], load_count[1], load_count[2]);
7984 if (load_count[0] == 0)
7985 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007986 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007987 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7988 else
7989 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7990 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007991
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007992 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7993 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7994 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007995
7996 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007997 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007998
7999 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008000 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00008002
Eilon Greenstein9a035442008-11-03 16:45:55 -08008003 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008004
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008005 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008006 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008007 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008008 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008009 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008010 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008011 bnx2x_free_mem(bp);
8012
8013 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008014
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008015 netif_carrier_off(bp->dev);
8016
8017 return 0;
8018}
8019
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008020static void bnx2x_reset_task(struct work_struct *work)
8021{
8022 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8023
8024#ifdef BNX2X_STOP_ON_ERROR
8025 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8026 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008027 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008028 return;
8029#endif
8030
8031 rtnl_lock();
8032
8033 if (!netif_running(bp->dev))
8034 goto reset_task_exit;
8035
8036 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8037 bnx2x_nic_load(bp, LOAD_NORMAL);
8038
8039reset_task_exit:
8040 rtnl_unlock();
8041}
8042
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008043/* end of nic load/unload */
8044
8045/* ethtool_ops */
8046
8047/*
8048 * Init service functions
8049 */
8050
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008051static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8052{
8053 switch (func) {
8054 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8055 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8056 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8057 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8058 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8059 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8060 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8061 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8062 default:
8063 BNX2X_ERR("Unsupported function index: %d\n", func);
8064 return (u32)(-1);
8065 }
8066}
8067
8068static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8069{
8070 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8071
8072 /* Flush all outstanding writes */
8073 mmiowb();
8074
8075 /* Pretend to be function 0 */
8076 REG_WR(bp, reg, 0);
8077 /* Flush the GRC transaction (in the chip) */
8078 new_val = REG_RD(bp, reg);
8079 if (new_val != 0) {
8080 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8081 new_val);
8082 BUG();
8083 }
8084
8085 /* From now we are in the "like-E1" mode */
8086 bnx2x_int_disable(bp);
8087
8088 /* Flush all outstanding writes */
8089 mmiowb();
8090
8091 /* Restore the original funtion settings */
8092 REG_WR(bp, reg, orig_func);
8093 new_val = REG_RD(bp, reg);
8094 if (new_val != orig_func) {
8095 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8096 orig_func, new_val);
8097 BUG();
8098 }
8099}
8100
8101static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8102{
8103 if (CHIP_IS_E1H(bp))
8104 bnx2x_undi_int_disable_e1h(bp, func);
8105 else
8106 bnx2x_int_disable(bp);
8107}
8108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008109static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008110{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008111 u32 val;
8112
8113 /* Check if there is any driver already loaded */
8114 val = REG_RD(bp, MISC_REG_UNPREPARED);
8115 if (val == 0x1) {
8116 /* Check if it is the UNDI driver
8117 * UNDI driver initializes CID offset for normal bell to 0x7
8118 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008119 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008120 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8121 if (val == 0x7) {
8122 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008123 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008124 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008125 u32 swap_en;
8126 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008127
Eilon Greensteinb4661732009-01-14 06:43:56 +00008128 /* clear the UNDI indication */
8129 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8130
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008131 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8132
8133 /* try unload UNDI on port 0 */
8134 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008135 bp->fw_seq =
8136 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8137 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008138 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008139
8140 /* if UNDI is loaded on the other port */
8141 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8142
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008143 /* send "DONE" for previous unload */
8144 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8145
8146 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008147 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008148 bp->fw_seq =
8149 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8150 DRV_MSG_SEQ_NUMBER_MASK);
8151 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008152
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008153 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008154 }
8155
Eilon Greensteinb4661732009-01-14 06:43:56 +00008156 /* now it's safe to release the lock */
8157 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8158
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008159 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008160
8161 /* close input traffic and wait for it */
8162 /* Do not rcv packets to BRB */
8163 REG_WR(bp,
8164 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8165 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8166 /* Do not direct rcv packets that are not for MCP to
8167 * the BRB */
8168 REG_WR(bp,
8169 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8170 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8171 /* clear AEU */
8172 REG_WR(bp,
8173 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8174 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8175 msleep(10);
8176
8177 /* save NIG port swap info */
8178 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8179 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008180 /* reset device */
8181 REG_WR(bp,
8182 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008183 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008184 REG_WR(bp,
8185 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8186 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008187 /* take the NIG out of reset and restore swap values */
8188 REG_WR(bp,
8189 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8190 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8191 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8192 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8193
8194 /* send unload done to the MCP */
8195 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8196
8197 /* restore our func and fw_seq */
8198 bp->func = func;
8199 bp->fw_seq =
8200 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8201 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008202
8203 } else
8204 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008205 }
8206}
8207
8208static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8209{
8210 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008211 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008212
8213 /* Get the chip revision id and number. */
8214 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8215 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8216 id = ((val & 0xffff) << 16);
8217 val = REG_RD(bp, MISC_REG_CHIP_REV);
8218 id |= ((val & 0xf) << 12);
8219 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8220 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008221 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008222 id |= (val & 0xf);
8223 bp->common.chip_id = id;
8224 bp->link_params.chip_id = bp->common.chip_id;
8225 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8226
Eilon Greenstein1c063282009-02-12 08:36:43 +00008227 val = (REG_RD(bp, 0x2874) & 0x55);
8228 if ((bp->common.chip_id & 0x1) ||
8229 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8230 bp->flags |= ONE_PORT_FLAG;
8231 BNX2X_DEV_INFO("single port device\n");
8232 }
8233
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008234 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8235 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8236 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8237 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8238 bp->common.flash_size, bp->common.flash_size);
8239
8240 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008241 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008242 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008243 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8244 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008245
8246 if (!bp->common.shmem_base ||
8247 (bp->common.shmem_base < 0xA0000) ||
8248 (bp->common.shmem_base >= 0xC0000)) {
8249 BNX2X_DEV_INFO("MCP not active\n");
8250 bp->flags |= NO_MCP_FLAG;
8251 return;
8252 }
8253
8254 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8255 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8256 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8257 BNX2X_ERR("BAD MCP validity signature\n");
8258
8259 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008260 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008261
8262 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8263 SHARED_HW_CFG_LED_MODE_MASK) >>
8264 SHARED_HW_CFG_LED_MODE_SHIFT);
8265
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008266 bp->link_params.feature_config_flags = 0;
8267 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8268 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8269 bp->link_params.feature_config_flags |=
8270 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8271 else
8272 bp->link_params.feature_config_flags &=
8273 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8274
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008275 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8276 bp->common.bc_ver = val;
8277 BNX2X_DEV_INFO("bc_ver %X\n", val);
8278 if (val < BNX2X_BC_VER) {
8279 /* for now only warn
8280 * later we might need to enforce this */
8281 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8282 " please upgrade BC\n", BNX2X_BC_VER, val);
8283 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008284 bp->link_params.feature_config_flags |=
8285 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8286 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008287
8288 if (BP_E1HVN(bp) == 0) {
8289 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8290 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8291 } else {
8292 /* no WOL capability for E1HVN != 0 */
8293 bp->flags |= NO_WOL_FLAG;
8294 }
8295 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008296 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008297
8298 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8299 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8300 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8301 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8302
8303 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8304 val, val2, val3, val4);
8305}
8306
8307static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8308 u32 switch_cfg)
8309{
8310 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008311 u32 ext_phy_type;
8312
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008313 switch (switch_cfg) {
8314 case SWITCH_CFG_1G:
8315 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8316
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008317 ext_phy_type =
8318 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008319 switch (ext_phy_type) {
8320 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8321 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8322 ext_phy_type);
8323
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008324 bp->port.supported |= (SUPPORTED_10baseT_Half |
8325 SUPPORTED_10baseT_Full |
8326 SUPPORTED_100baseT_Half |
8327 SUPPORTED_100baseT_Full |
8328 SUPPORTED_1000baseT_Full |
8329 SUPPORTED_2500baseX_Full |
8330 SUPPORTED_TP |
8331 SUPPORTED_FIBRE |
8332 SUPPORTED_Autoneg |
8333 SUPPORTED_Pause |
8334 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008335 break;
8336
8337 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8338 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8339 ext_phy_type);
8340
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008341 bp->port.supported |= (SUPPORTED_10baseT_Half |
8342 SUPPORTED_10baseT_Full |
8343 SUPPORTED_100baseT_Half |
8344 SUPPORTED_100baseT_Full |
8345 SUPPORTED_1000baseT_Full |
8346 SUPPORTED_TP |
8347 SUPPORTED_FIBRE |
8348 SUPPORTED_Autoneg |
8349 SUPPORTED_Pause |
8350 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008351 break;
8352
8353 default:
8354 BNX2X_ERR("NVRAM config error. "
8355 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008356 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008357 return;
8358 }
8359
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008360 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8361 port*0x10);
8362 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008363 break;
8364
8365 case SWITCH_CFG_10G:
8366 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8367
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008368 ext_phy_type =
8369 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008370 switch (ext_phy_type) {
8371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8372 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8373 ext_phy_type);
8374
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008375 bp->port.supported |= (SUPPORTED_10baseT_Half |
8376 SUPPORTED_10baseT_Full |
8377 SUPPORTED_100baseT_Half |
8378 SUPPORTED_100baseT_Full |
8379 SUPPORTED_1000baseT_Full |
8380 SUPPORTED_2500baseX_Full |
8381 SUPPORTED_10000baseT_Full |
8382 SUPPORTED_TP |
8383 SUPPORTED_FIBRE |
8384 SUPPORTED_Autoneg |
8385 SUPPORTED_Pause |
8386 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008387 break;
8388
Eliezer Tamirf1410642008-02-28 11:51:50 -08008389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8390 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8391 ext_phy_type);
8392
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008393 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8394 SUPPORTED_1000baseT_Full |
8395 SUPPORTED_FIBRE |
8396 SUPPORTED_Autoneg |
8397 SUPPORTED_Pause |
8398 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008399 break;
8400
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008401 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8402 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8403 ext_phy_type);
8404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008405 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8406 SUPPORTED_2500baseX_Full |
8407 SUPPORTED_1000baseT_Full |
8408 SUPPORTED_FIBRE |
8409 SUPPORTED_Autoneg |
8410 SUPPORTED_Pause |
8411 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008412 break;
8413
Eilon Greenstein589abe32009-02-12 08:36:55 +00008414 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8415 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8416 ext_phy_type);
8417
8418 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8419 SUPPORTED_FIBRE |
8420 SUPPORTED_Pause |
8421 SUPPORTED_Asym_Pause);
8422 break;
8423
8424 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8425 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8426 ext_phy_type);
8427
8428 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8429 SUPPORTED_1000baseT_Full |
8430 SUPPORTED_FIBRE |
8431 SUPPORTED_Pause |
8432 SUPPORTED_Asym_Pause);
8433 break;
8434
8435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8436 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8437 ext_phy_type);
8438
8439 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8440 SUPPORTED_1000baseT_Full |
8441 SUPPORTED_Autoneg |
8442 SUPPORTED_FIBRE |
8443 SUPPORTED_Pause |
8444 SUPPORTED_Asym_Pause);
8445 break;
8446
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008447 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8448 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8449 ext_phy_type);
8450
8451 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8452 SUPPORTED_1000baseT_Full |
8453 SUPPORTED_Autoneg |
8454 SUPPORTED_FIBRE |
8455 SUPPORTED_Pause |
8456 SUPPORTED_Asym_Pause);
8457 break;
8458
Eliezer Tamirf1410642008-02-28 11:51:50 -08008459 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8460 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8461 ext_phy_type);
8462
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008463 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8464 SUPPORTED_TP |
8465 SUPPORTED_Autoneg |
8466 SUPPORTED_Pause |
8467 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008468 break;
8469
Eilon Greenstein28577182009-02-12 08:37:00 +00008470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8471 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8472 ext_phy_type);
8473
8474 bp->port.supported |= (SUPPORTED_10baseT_Half |
8475 SUPPORTED_10baseT_Full |
8476 SUPPORTED_100baseT_Half |
8477 SUPPORTED_100baseT_Full |
8478 SUPPORTED_1000baseT_Full |
8479 SUPPORTED_10000baseT_Full |
8480 SUPPORTED_TP |
8481 SUPPORTED_Autoneg |
8482 SUPPORTED_Pause |
8483 SUPPORTED_Asym_Pause);
8484 break;
8485
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008486 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8487 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8488 bp->link_params.ext_phy_config);
8489 break;
8490
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008491 default:
8492 BNX2X_ERR("NVRAM config error. "
8493 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008494 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008495 return;
8496 }
8497
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008498 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8499 port*0x18);
8500 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008501
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008502 break;
8503
8504 default:
8505 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008506 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008507 return;
8508 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008509 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008510
8511 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008512 if (!(bp->link_params.speed_cap_mask &
8513 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008514 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008515
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008516 if (!(bp->link_params.speed_cap_mask &
8517 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008518 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008519
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008520 if (!(bp->link_params.speed_cap_mask &
8521 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008522 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008523
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008524 if (!(bp->link_params.speed_cap_mask &
8525 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008526 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008527
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008528 if (!(bp->link_params.speed_cap_mask &
8529 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008530 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8531 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008532
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008533 if (!(bp->link_params.speed_cap_mask &
8534 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008535 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008536
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008537 if (!(bp->link_params.speed_cap_mask &
8538 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008539 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008541 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542}
8543
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008544static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008545{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008546 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008547
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008548 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008549 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008550 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008551 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008552 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008553 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008554 u32 ext_phy_type =
8555 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8556
8557 if ((ext_phy_type ==
8558 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8559 (ext_phy_type ==
8560 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008561 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008562 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008563 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008564 (ADVERTISED_10000baseT_Full |
8565 ADVERTISED_FIBRE);
8566 break;
8567 }
8568 BNX2X_ERR("NVRAM config error. "
8569 "Invalid link_config 0x%x"
8570 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008571 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008572 return;
8573 }
8574 break;
8575
8576 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008577 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008578 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008579 bp->port.advertising = (ADVERTISED_10baseT_Full |
8580 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008581 } else {
8582 BNX2X_ERR("NVRAM config error. "
8583 "Invalid link_config 0x%x"
8584 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008585 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008586 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008587 return;
8588 }
8589 break;
8590
8591 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008592 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008593 bp->link_params.req_line_speed = SPEED_10;
8594 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008595 bp->port.advertising = (ADVERTISED_10baseT_Half |
8596 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008597 } else {
8598 BNX2X_ERR("NVRAM config error. "
8599 "Invalid link_config 0x%x"
8600 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008601 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008602 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008603 return;
8604 }
8605 break;
8606
8607 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008608 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008609 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008610 bp->port.advertising = (ADVERTISED_100baseT_Full |
8611 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008612 } else {
8613 BNX2X_ERR("NVRAM config error. "
8614 "Invalid link_config 0x%x"
8615 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008616 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008617 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008618 return;
8619 }
8620 break;
8621
8622 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008623 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008624 bp->link_params.req_line_speed = SPEED_100;
8625 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008626 bp->port.advertising = (ADVERTISED_100baseT_Half |
8627 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008628 } else {
8629 BNX2X_ERR("NVRAM config error. "
8630 "Invalid link_config 0x%x"
8631 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008632 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008633 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008634 return;
8635 }
8636 break;
8637
8638 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008639 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008640 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008641 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8642 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008643 } else {
8644 BNX2X_ERR("NVRAM config error. "
8645 "Invalid link_config 0x%x"
8646 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008647 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008648 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008649 return;
8650 }
8651 break;
8652
8653 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008654 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008655 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008656 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8657 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008658 } else {
8659 BNX2X_ERR("NVRAM config error. "
8660 "Invalid link_config 0x%x"
8661 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008662 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008663 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008664 return;
8665 }
8666 break;
8667
8668 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8669 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8670 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008671 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008672 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008673 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8674 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008675 } else {
8676 BNX2X_ERR("NVRAM config error. "
8677 "Invalid link_config 0x%x"
8678 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008679 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008680 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008681 return;
8682 }
8683 break;
8684
8685 default:
8686 BNX2X_ERR("NVRAM config error. "
8687 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008688 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008689 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008690 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008691 break;
8692 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008694 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8695 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008696 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008697 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008698 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008699
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008700 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008701 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008702 bp->link_params.req_line_speed,
8703 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008704 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008705}
8706
Michael Chane665bfd2009-10-10 13:46:54 +00008707static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8708{
8709 mac_hi = cpu_to_be16(mac_hi);
8710 mac_lo = cpu_to_be32(mac_lo);
8711 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8712 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8713}
8714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008715static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008716{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008717 int port = BP_PORT(bp);
8718 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008719 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008720 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008721 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008722
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008723 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008724 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008725
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008726 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008727 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008728 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008729 SHMEM_RD(bp,
8730 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008731 /* BCM8727_NOC => BCM8727 no over current */
8732 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8733 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8734 bp->link_params.ext_phy_config &=
8735 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8736 bp->link_params.ext_phy_config |=
8737 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8738 bp->link_params.feature_config_flags |=
8739 FEATURE_CONFIG_BCM8727_NOC;
8740 }
8741
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008742 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008743 SHMEM_RD(bp,
8744 dev_info.port_hw_config[port].speed_capability_mask);
8745
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008746 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008747 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8748
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008749 /* Get the 4 lanes xgxs config rx and tx */
8750 for (i = 0; i < 2; i++) {
8751 val = SHMEM_RD(bp,
8752 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8753 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8754 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8755
8756 val = SHMEM_RD(bp,
8757 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8758 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8759 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8760 }
8761
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008762 /* If the device is capable of WoL, set the default state according
8763 * to the HW
8764 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008765 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008766 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8767 (config & PORT_FEATURE_WOL_ENABLED));
8768
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008769 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8770 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008771 bp->link_params.lane_config,
8772 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008773 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008774
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008775 bp->link_params.switch_cfg |= (bp->port.link_config &
8776 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008777 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008778
8779 bnx2x_link_settings_requested(bp);
8780
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008781 /*
8782 * If connected directly, work with the internal PHY, otherwise, work
8783 * with the external PHY
8784 */
8785 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8786 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8787 bp->mdio.prtad = bp->link_params.phy_addr;
8788
8789 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8790 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8791 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008792 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008793
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008794 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8795 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008796 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008797 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8798 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008799
8800#ifdef BCM_CNIC
8801 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8802 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8803 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8804#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008805}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008806
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008807static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8808{
8809 int func = BP_FUNC(bp);
8810 u32 val, val2;
8811 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008812
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008813 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008814
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008815 bp->e1hov = 0;
8816 bp->e1hmf = 0;
8817 if (CHIP_IS_E1H(bp)) {
8818 bp->mf_config =
8819 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008820
Eilon Greenstein2691d512009-08-12 08:22:08 +00008821 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008822 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008823 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008824 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008825 BNX2X_DEV_INFO("%s function mode\n",
8826 IS_E1HMF(bp) ? "multi" : "single");
8827
8828 if (IS_E1HMF(bp)) {
8829 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8830 e1hov_tag) &
8831 FUNC_MF_CFG_E1HOV_TAG_MASK);
8832 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8833 bp->e1hov = val;
8834 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8835 "(0x%04x)\n",
8836 func, bp->e1hov, bp->e1hov);
8837 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008838 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8839 " aborting\n", func);
8840 rc = -EPERM;
8841 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008842 } else {
8843 if (BP_E1HVN(bp)) {
8844 BNX2X_ERR("!!! VN %d in single function mode,"
8845 " aborting\n", BP_E1HVN(bp));
8846 rc = -EPERM;
8847 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008848 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008849 }
8850
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008851 if (!BP_NOMCP(bp)) {
8852 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008853
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008854 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8855 DRV_MSG_SEQ_NUMBER_MASK);
8856 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8857 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008858
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008859 if (IS_E1HMF(bp)) {
8860 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8861 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8862 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8863 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8864 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8865 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8866 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8867 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8868 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8869 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8870 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8871 ETH_ALEN);
8872 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8873 ETH_ALEN);
8874 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008875
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008876 return rc;
8877 }
8878
8879 if (BP_NOMCP(bp)) {
8880 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008881 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008882 random_ether_addr(bp->dev->dev_addr);
8883 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8884 }
8885
8886 return rc;
8887}
8888
8889static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8890{
8891 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008892 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008893 int rc;
8894
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008895 /* Disable interrupt handling until HW is initialized */
8896 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008897 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008898
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008899 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008900 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008901#ifdef BCM_CNIC
8902 mutex_init(&bp->cnic_mutex);
8903#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008904
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008905 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008906 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8907
8908 rc = bnx2x_get_hwinfo(bp);
8909
8910 /* need to reset chip if undi was active */
8911 if (!BP_NOMCP(bp))
8912 bnx2x_undi_unload(bp);
8913
8914 if (CHIP_REV_IS_FPGA(bp))
8915 printk(KERN_ERR PFX "FPGA detected\n");
8916
8917 if (BP_NOMCP(bp) && (func == 0))
8918 printk(KERN_ERR PFX
8919 "MCP disabled, must load devices in order!\n");
8920
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008921 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008922 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8923 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008924 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008925 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008926 multi_mode = ETH_RSS_MODE_DISABLED;
8927 }
8928 bp->multi_mode = multi_mode;
8929
8930
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008931 /* Set TPA flags */
8932 if (disable_tpa) {
8933 bp->flags &= ~TPA_ENABLE_FLAG;
8934 bp->dev->features &= ~NETIF_F_LRO;
8935 } else {
8936 bp->flags |= TPA_ENABLE_FLAG;
8937 bp->dev->features |= NETIF_F_LRO;
8938 }
8939
Eilon Greensteina18f5122009-08-12 08:23:26 +00008940 if (CHIP_IS_E1(bp))
8941 bp->dropless_fc = 0;
8942 else
8943 bp->dropless_fc = dropless_fc;
8944
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008945 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008946
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008947 bp->tx_ring_size = MAX_TX_AVAIL;
8948 bp->rx_ring_size = MAX_RX_AVAIL;
8949
8950 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008951
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008952 /* make sure that the numbers are in the right granularity */
8953 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8954 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008955
Eilon Greenstein87942b42009-02-12 08:36:49 +00008956 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8957 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008958
8959 init_timer(&bp->timer);
8960 bp->timer.expires = jiffies + bp->current_interval;
8961 bp->timer.data = (unsigned long) bp;
8962 bp->timer.function = bnx2x_timer;
8963
8964 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008965}
8966
8967/*
8968 * ethtool service functions
8969 */
8970
8971/* All ethtool functions called with rtnl_lock */
8972
8973static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8974{
8975 struct bnx2x *bp = netdev_priv(dev);
8976
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008977 cmd->supported = bp->port.supported;
8978 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008979
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008980 if ((bp->state == BNX2X_STATE_OPEN) &&
8981 !(bp->flags & MF_FUNC_DIS) &&
8982 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008983 cmd->speed = bp->link_vars.line_speed;
8984 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008985 if (IS_E1HMF(bp)) {
8986 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008987
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008988 vn_max_rate =
8989 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008990 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008991 if (vn_max_rate < cmd->speed)
8992 cmd->speed = vn_max_rate;
8993 }
8994 } else {
8995 cmd->speed = -1;
8996 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008997 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008998
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008999 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9000 u32 ext_phy_type =
9001 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009002
9003 switch (ext_phy_type) {
9004 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009005 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009007 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9009 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009011 cmd->port = PORT_FIBRE;
9012 break;
9013
9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009016 cmd->port = PORT_TP;
9017 break;
9018
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9020 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9021 bp->link_params.ext_phy_config);
9022 break;
9023
Eliezer Tamirf1410642008-02-28 11:51:50 -08009024 default:
9025 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009026 bp->link_params.ext_phy_config);
9027 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009028 }
9029 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009030 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009031
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009032 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 cmd->transceiver = XCVR_INTERNAL;
9034
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009035 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009036 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009037 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009039
9040 cmd->maxtxpkt = 0;
9041 cmd->maxrxpkt = 0;
9042
9043 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9044 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9045 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9046 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9047 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9048 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9049 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9050
9051 return 0;
9052}
9053
9054static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9055{
9056 struct bnx2x *bp = netdev_priv(dev);
9057 u32 advertising;
9058
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009059 if (IS_E1HMF(bp))
9060 return 0;
9061
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009062 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9063 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9064 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9065 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9066 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9067 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9068 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9069
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009071 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9072 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009073 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009074 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009075
9076 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009077 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009079 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9080 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009081 bp->port.advertising |= (ADVERTISED_Autoneg |
9082 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009083
9084 } else { /* forced speed */
9085 /* advertise the requested speed and duplex if supported */
9086 switch (cmd->speed) {
9087 case SPEED_10:
9088 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009089 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009090 SUPPORTED_10baseT_Full)) {
9091 DP(NETIF_MSG_LINK,
9092 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009093 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009094 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009095
9096 advertising = (ADVERTISED_10baseT_Full |
9097 ADVERTISED_TP);
9098 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009099 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009100 SUPPORTED_10baseT_Half)) {
9101 DP(NETIF_MSG_LINK,
9102 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009103 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009104 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009105
9106 advertising = (ADVERTISED_10baseT_Half |
9107 ADVERTISED_TP);
9108 }
9109 break;
9110
9111 case SPEED_100:
9112 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009113 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009114 SUPPORTED_100baseT_Full)) {
9115 DP(NETIF_MSG_LINK,
9116 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009117 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009118 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009119
9120 advertising = (ADVERTISED_100baseT_Full |
9121 ADVERTISED_TP);
9122 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009123 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009124 SUPPORTED_100baseT_Half)) {
9125 DP(NETIF_MSG_LINK,
9126 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009127 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009128 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009129
9130 advertising = (ADVERTISED_100baseT_Half |
9131 ADVERTISED_TP);
9132 }
9133 break;
9134
9135 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009136 if (cmd->duplex != DUPLEX_FULL) {
9137 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009139 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009140
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009141 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009142 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009143 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009144 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009145
9146 advertising = (ADVERTISED_1000baseT_Full |
9147 ADVERTISED_TP);
9148 break;
9149
9150 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009151 if (cmd->duplex != DUPLEX_FULL) {
9152 DP(NETIF_MSG_LINK,
9153 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009154 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009155 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009156
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009157 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009158 DP(NETIF_MSG_LINK,
9159 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009160 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009161 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009162
Eliezer Tamirf1410642008-02-28 11:51:50 -08009163 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009164 ADVERTISED_TP);
9165 break;
9166
9167 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009168 if (cmd->duplex != DUPLEX_FULL) {
9169 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009170 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009172
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009173 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009174 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009175 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009176 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009177
9178 advertising = (ADVERTISED_10000baseT_Full |
9179 ADVERTISED_FIBRE);
9180 break;
9181
9182 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009183 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009184 return -EINVAL;
9185 }
9186
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009187 bp->link_params.req_line_speed = cmd->speed;
9188 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009189 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009190 }
9191
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009192 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009193 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009194 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009195 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009196
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009197 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009198 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009199 bnx2x_link_set(bp);
9200 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009201
9202 return 0;
9203}
9204
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009205#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9206#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9207
9208static int bnx2x_get_regs_len(struct net_device *dev)
9209{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009210 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009211 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009212 int i;
9213
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009214 if (CHIP_IS_E1(bp)) {
9215 for (i = 0; i < REGS_COUNT; i++)
9216 if (IS_E1_ONLINE(reg_addrs[i].info))
9217 regdump_len += reg_addrs[i].size;
9218
9219 for (i = 0; i < WREGS_COUNT_E1; i++)
9220 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9221 regdump_len += wreg_addrs_e1[i].size *
9222 (1 + wreg_addrs_e1[i].read_regs_count);
9223
9224 } else { /* E1H */
9225 for (i = 0; i < REGS_COUNT; i++)
9226 if (IS_E1H_ONLINE(reg_addrs[i].info))
9227 regdump_len += reg_addrs[i].size;
9228
9229 for (i = 0; i < WREGS_COUNT_E1H; i++)
9230 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9231 regdump_len += wreg_addrs_e1h[i].size *
9232 (1 + wreg_addrs_e1h[i].read_regs_count);
9233 }
9234 regdump_len *= 4;
9235 regdump_len += sizeof(struct dump_hdr);
9236
9237 return regdump_len;
9238}
9239
9240static void bnx2x_get_regs(struct net_device *dev,
9241 struct ethtool_regs *regs, void *_p)
9242{
9243 u32 *p = _p, i, j;
9244 struct bnx2x *bp = netdev_priv(dev);
9245 struct dump_hdr dump_hdr = {0};
9246
9247 regs->version = 0;
9248 memset(p, 0, regs->len);
9249
9250 if (!netif_running(bp->dev))
9251 return;
9252
9253 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9254 dump_hdr.dump_sign = dump_sign_all;
9255 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9256 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9257 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9258 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9259 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9260
9261 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9262 p += dump_hdr.hdr_size + 1;
9263
9264 if (CHIP_IS_E1(bp)) {
9265 for (i = 0; i < REGS_COUNT; i++)
9266 if (IS_E1_ONLINE(reg_addrs[i].info))
9267 for (j = 0; j < reg_addrs[i].size; j++)
9268 *p++ = REG_RD(bp,
9269 reg_addrs[i].addr + j*4);
9270
9271 } else { /* E1H */
9272 for (i = 0; i < REGS_COUNT; i++)
9273 if (IS_E1H_ONLINE(reg_addrs[i].info))
9274 for (j = 0; j < reg_addrs[i].size; j++)
9275 *p++ = REG_RD(bp,
9276 reg_addrs[i].addr + j*4);
9277 }
9278}
9279
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009280#define PHY_FW_VER_LEN 10
9281
9282static void bnx2x_get_drvinfo(struct net_device *dev,
9283 struct ethtool_drvinfo *info)
9284{
9285 struct bnx2x *bp = netdev_priv(dev);
9286 u8 phy_fw_ver[PHY_FW_VER_LEN];
9287
9288 strcpy(info->driver, DRV_MODULE_NAME);
9289 strcpy(info->version, DRV_MODULE_VERSION);
9290
9291 phy_fw_ver[0] = '\0';
9292 if (bp->port.pmf) {
9293 bnx2x_acquire_phy_lock(bp);
9294 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9295 (bp->state != BNX2X_STATE_CLOSED),
9296 phy_fw_ver, PHY_FW_VER_LEN);
9297 bnx2x_release_phy_lock(bp);
9298 }
9299
9300 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9301 (bp->common.bc_ver & 0xff0000) >> 16,
9302 (bp->common.bc_ver & 0xff00) >> 8,
9303 (bp->common.bc_ver & 0xff),
9304 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9305 strcpy(info->bus_info, pci_name(bp->pdev));
9306 info->n_stats = BNX2X_NUM_STATS;
9307 info->testinfo_len = BNX2X_NUM_TESTS;
9308 info->eedump_len = bp->common.flash_size;
9309 info->regdump_len = bnx2x_get_regs_len(dev);
9310}
9311
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009312static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9313{
9314 struct bnx2x *bp = netdev_priv(dev);
9315
9316 if (bp->flags & NO_WOL_FLAG) {
9317 wol->supported = 0;
9318 wol->wolopts = 0;
9319 } else {
9320 wol->supported = WAKE_MAGIC;
9321 if (bp->wol)
9322 wol->wolopts = WAKE_MAGIC;
9323 else
9324 wol->wolopts = 0;
9325 }
9326 memset(&wol->sopass, 0, sizeof(wol->sopass));
9327}
9328
9329static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9330{
9331 struct bnx2x *bp = netdev_priv(dev);
9332
9333 if (wol->wolopts & ~WAKE_MAGIC)
9334 return -EINVAL;
9335
9336 if (wol->wolopts & WAKE_MAGIC) {
9337 if (bp->flags & NO_WOL_FLAG)
9338 return -EINVAL;
9339
9340 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009341 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009342 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009343
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009344 return 0;
9345}
9346
9347static u32 bnx2x_get_msglevel(struct net_device *dev)
9348{
9349 struct bnx2x *bp = netdev_priv(dev);
9350
9351 return bp->msglevel;
9352}
9353
9354static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9355{
9356 struct bnx2x *bp = netdev_priv(dev);
9357
9358 if (capable(CAP_NET_ADMIN))
9359 bp->msglevel = level;
9360}
9361
9362static int bnx2x_nway_reset(struct net_device *dev)
9363{
9364 struct bnx2x *bp = netdev_priv(dev);
9365
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009366 if (!bp->port.pmf)
9367 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009368
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009369 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009370 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009371 bnx2x_link_set(bp);
9372 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009373
9374 return 0;
9375}
9376
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009377static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009378{
9379 struct bnx2x *bp = netdev_priv(dev);
9380
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009381 if (bp->flags & MF_FUNC_DIS)
9382 return 0;
9383
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009384 return bp->link_vars.link_up;
9385}
9386
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009387static int bnx2x_get_eeprom_len(struct net_device *dev)
9388{
9389 struct bnx2x *bp = netdev_priv(dev);
9390
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009391 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009392}
9393
9394static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9395{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009396 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009397 int count, i;
9398 u32 val = 0;
9399
9400 /* adjust timeout for emulation/FPGA */
9401 count = NVRAM_TIMEOUT_COUNT;
9402 if (CHIP_REV_IS_SLOW(bp))
9403 count *= 100;
9404
9405 /* request access to nvram interface */
9406 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9407 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9408
9409 for (i = 0; i < count*10; i++) {
9410 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9411 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9412 break;
9413
9414 udelay(5);
9415 }
9416
9417 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009418 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009419 return -EBUSY;
9420 }
9421
9422 return 0;
9423}
9424
9425static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9426{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009427 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009428 int count, i;
9429 u32 val = 0;
9430
9431 /* adjust timeout for emulation/FPGA */
9432 count = NVRAM_TIMEOUT_COUNT;
9433 if (CHIP_REV_IS_SLOW(bp))
9434 count *= 100;
9435
9436 /* relinquish nvram interface */
9437 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9438 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9439
9440 for (i = 0; i < count*10; i++) {
9441 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9442 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9443 break;
9444
9445 udelay(5);
9446 }
9447
9448 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009449 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009450 return -EBUSY;
9451 }
9452
9453 return 0;
9454}
9455
9456static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9457{
9458 u32 val;
9459
9460 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9461
9462 /* enable both bits, even on read */
9463 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9464 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9465 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9466}
9467
9468static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9469{
9470 u32 val;
9471
9472 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9473
9474 /* disable both bits, even after read */
9475 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9476 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9477 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9478}
9479
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009480static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009481 u32 cmd_flags)
9482{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009483 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009484 u32 val;
9485
9486 /* build the command word */
9487 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9488
9489 /* need to clear DONE bit separately */
9490 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9491
9492 /* address of the NVRAM to read from */
9493 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9494 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9495
9496 /* issue a read command */
9497 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9498
9499 /* adjust timeout for emulation/FPGA */
9500 count = NVRAM_TIMEOUT_COUNT;
9501 if (CHIP_REV_IS_SLOW(bp))
9502 count *= 100;
9503
9504 /* wait for completion */
9505 *ret_val = 0;
9506 rc = -EBUSY;
9507 for (i = 0; i < count; i++) {
9508 udelay(5);
9509 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9510
9511 if (val & MCPR_NVM_COMMAND_DONE) {
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009513 /* we read nvram data in cpu order
9514 * but ethtool sees it as an array of bytes
9515 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009516 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009517 rc = 0;
9518 break;
9519 }
9520 }
9521
9522 return rc;
9523}
9524
9525static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9526 int buf_size)
9527{
9528 int rc;
9529 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009530 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009531
9532 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009533 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009534 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009535 offset, buf_size);
9536 return -EINVAL;
9537 }
9538
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009539 if (offset + buf_size > bp->common.flash_size) {
9540 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009541 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009542 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009543 return -EINVAL;
9544 }
9545
9546 /* request access to nvram interface */
9547 rc = bnx2x_acquire_nvram_lock(bp);
9548 if (rc)
9549 return rc;
9550
9551 /* enable access to nvram interface */
9552 bnx2x_enable_nvram_access(bp);
9553
9554 /* read the first word(s) */
9555 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9556 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9557 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9558 memcpy(ret_buf, &val, 4);
9559
9560 /* advance to the next dword */
9561 offset += sizeof(u32);
9562 ret_buf += sizeof(u32);
9563 buf_size -= sizeof(u32);
9564 cmd_flags = 0;
9565 }
9566
9567 if (rc == 0) {
9568 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9569 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9570 memcpy(ret_buf, &val, 4);
9571 }
9572
9573 /* disable access to nvram interface */
9574 bnx2x_disable_nvram_access(bp);
9575 bnx2x_release_nvram_lock(bp);
9576
9577 return rc;
9578}
9579
9580static int bnx2x_get_eeprom(struct net_device *dev,
9581 struct ethtool_eeprom *eeprom, u8 *eebuf)
9582{
9583 struct bnx2x *bp = netdev_priv(dev);
9584 int rc;
9585
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009586 if (!netif_running(dev))
9587 return -EAGAIN;
9588
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009589 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009590 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9591 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9592 eeprom->len, eeprom->len);
9593
9594 /* parameters already validated in ethtool_get_eeprom */
9595
9596 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9597
9598 return rc;
9599}
9600
9601static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9602 u32 cmd_flags)
9603{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009604 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009605
9606 /* build the command word */
9607 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9608
9609 /* need to clear DONE bit separately */
9610 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9611
9612 /* write the data */
9613 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9614
9615 /* address of the NVRAM to write to */
9616 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9617 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9618
9619 /* issue the write command */
9620 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9621
9622 /* adjust timeout for emulation/FPGA */
9623 count = NVRAM_TIMEOUT_COUNT;
9624 if (CHIP_REV_IS_SLOW(bp))
9625 count *= 100;
9626
9627 /* wait for completion */
9628 rc = -EBUSY;
9629 for (i = 0; i < count; i++) {
9630 udelay(5);
9631 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9632 if (val & MCPR_NVM_COMMAND_DONE) {
9633 rc = 0;
9634 break;
9635 }
9636 }
9637
9638 return rc;
9639}
9640
Eliezer Tamirf1410642008-02-28 11:51:50 -08009641#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009642
9643static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9644 int buf_size)
9645{
9646 int rc;
9647 u32 cmd_flags;
9648 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009649 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009650
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009651 if (offset + buf_size > bp->common.flash_size) {
9652 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009653 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009654 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009655 return -EINVAL;
9656 }
9657
9658 /* request access to nvram interface */
9659 rc = bnx2x_acquire_nvram_lock(bp);
9660 if (rc)
9661 return rc;
9662
9663 /* enable access to nvram interface */
9664 bnx2x_enable_nvram_access(bp);
9665
9666 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9667 align_offset = (offset & ~0x03);
9668 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9669
9670 if (rc == 0) {
9671 val &= ~(0xff << BYTE_OFFSET(offset));
9672 val |= (*data_buf << BYTE_OFFSET(offset));
9673
9674 /* nvram data is returned as an array of bytes
9675 * convert it back to cpu order */
9676 val = be32_to_cpu(val);
9677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009678 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9679 cmd_flags);
9680 }
9681
9682 /* disable access to nvram interface */
9683 bnx2x_disable_nvram_access(bp);
9684 bnx2x_release_nvram_lock(bp);
9685
9686 return rc;
9687}
9688
9689static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9690 int buf_size)
9691{
9692 int rc;
9693 u32 cmd_flags;
9694 u32 val;
9695 u32 written_so_far;
9696
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009697 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009698 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009699
9700 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009701 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009702 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009703 offset, buf_size);
9704 return -EINVAL;
9705 }
9706
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009707 if (offset + buf_size > bp->common.flash_size) {
9708 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009709 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009710 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009711 return -EINVAL;
9712 }
9713
9714 /* request access to nvram interface */
9715 rc = bnx2x_acquire_nvram_lock(bp);
9716 if (rc)
9717 return rc;
9718
9719 /* enable access to nvram interface */
9720 bnx2x_enable_nvram_access(bp);
9721
9722 written_so_far = 0;
9723 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9724 while ((written_so_far < buf_size) && (rc == 0)) {
9725 if (written_so_far == (buf_size - sizeof(u32)))
9726 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9727 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9728 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9729 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9730 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9731
9732 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009733
9734 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9735
9736 /* advance to the next dword */
9737 offset += sizeof(u32);
9738 data_buf += sizeof(u32);
9739 written_so_far += sizeof(u32);
9740 cmd_flags = 0;
9741 }
9742
9743 /* disable access to nvram interface */
9744 bnx2x_disable_nvram_access(bp);
9745 bnx2x_release_nvram_lock(bp);
9746
9747 return rc;
9748}
9749
9750static int bnx2x_set_eeprom(struct net_device *dev,
9751 struct ethtool_eeprom *eeprom, u8 *eebuf)
9752{
9753 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009754 int port = BP_PORT(bp);
9755 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009756
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009757 if (!netif_running(dev))
9758 return -EAGAIN;
9759
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009760 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009761 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9762 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9763 eeprom->len, eeprom->len);
9764
9765 /* parameters already validated in ethtool_set_eeprom */
9766
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009767 /* PHY eeprom can be accessed only by the PMF */
9768 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9769 !bp->port.pmf)
9770 return -EINVAL;
9771
9772 if (eeprom->magic == 0x50485950) {
9773 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9774 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9775
9776 bnx2x_acquire_phy_lock(bp);
9777 rc |= bnx2x_link_reset(&bp->link_params,
9778 &bp->link_vars, 0);
9779 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9780 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9781 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9782 MISC_REGISTERS_GPIO_HIGH, port);
9783 bnx2x_release_phy_lock(bp);
9784 bnx2x_link_report(bp);
9785
9786 } else if (eeprom->magic == 0x50485952) {
9787 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009788 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009789 bnx2x_acquire_phy_lock(bp);
9790 rc |= bnx2x_link_reset(&bp->link_params,
9791 &bp->link_vars, 1);
9792
9793 rc |= bnx2x_phy_init(&bp->link_params,
9794 &bp->link_vars);
9795 bnx2x_release_phy_lock(bp);
9796 bnx2x_calc_fc_adv(bp);
9797 }
9798 } else if (eeprom->magic == 0x53985943) {
9799 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9800 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9801 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9802 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009803 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009804
9805 /* DSP Remove Download Mode */
9806 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9807 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009808
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009809 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009810
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009811 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9812
9813 /* wait 0.5 sec to allow it to run */
9814 msleep(500);
9815 bnx2x_ext_phy_hw_reset(bp, port);
9816 msleep(500);
9817 bnx2x_release_phy_lock(bp);
9818 }
9819 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009820 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009821
9822 return rc;
9823}
9824
9825static int bnx2x_get_coalesce(struct net_device *dev,
9826 struct ethtool_coalesce *coal)
9827{
9828 struct bnx2x *bp = netdev_priv(dev);
9829
9830 memset(coal, 0, sizeof(struct ethtool_coalesce));
9831
9832 coal->rx_coalesce_usecs = bp->rx_ticks;
9833 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009834
9835 return 0;
9836}
9837
Eilon Greensteinca003922009-08-12 22:53:28 -07009838#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009839static int bnx2x_set_coalesce(struct net_device *dev,
9840 struct ethtool_coalesce *coal)
9841{
9842 struct bnx2x *bp = netdev_priv(dev);
9843
9844 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009845 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9846 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009847
9848 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009849 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9850 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009851
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009852 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009853 bnx2x_update_coalesce(bp);
9854
9855 return 0;
9856}
9857
9858static void bnx2x_get_ringparam(struct net_device *dev,
9859 struct ethtool_ringparam *ering)
9860{
9861 struct bnx2x *bp = netdev_priv(dev);
9862
9863 ering->rx_max_pending = MAX_RX_AVAIL;
9864 ering->rx_mini_max_pending = 0;
9865 ering->rx_jumbo_max_pending = 0;
9866
9867 ering->rx_pending = bp->rx_ring_size;
9868 ering->rx_mini_pending = 0;
9869 ering->rx_jumbo_pending = 0;
9870
9871 ering->tx_max_pending = MAX_TX_AVAIL;
9872 ering->tx_pending = bp->tx_ring_size;
9873}
9874
9875static int bnx2x_set_ringparam(struct net_device *dev,
9876 struct ethtool_ringparam *ering)
9877{
9878 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009879 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009880
9881 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9882 (ering->tx_pending > MAX_TX_AVAIL) ||
9883 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9884 return -EINVAL;
9885
9886 bp->rx_ring_size = ering->rx_pending;
9887 bp->tx_ring_size = ering->tx_pending;
9888
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009889 if (netif_running(dev)) {
9890 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9891 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009892 }
9893
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009894 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009895}
9896
9897static void bnx2x_get_pauseparam(struct net_device *dev,
9898 struct ethtool_pauseparam *epause)
9899{
9900 struct bnx2x *bp = netdev_priv(dev);
9901
Eilon Greenstein356e2382009-02-12 08:38:32 +00009902 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9903 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009904 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9905
David S. Millerc0700f92008-12-16 23:53:20 -08009906 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9907 BNX2X_FLOW_CTRL_RX);
9908 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9909 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009910
9911 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9912 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9913 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9914}
9915
9916static int bnx2x_set_pauseparam(struct net_device *dev,
9917 struct ethtool_pauseparam *epause)
9918{
9919 struct bnx2x *bp = netdev_priv(dev);
9920
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009921 if (IS_E1HMF(bp))
9922 return 0;
9923
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009924 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9925 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9926 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9927
David S. Millerc0700f92008-12-16 23:53:20 -08009928 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009929
9930 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009932
9933 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009934 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009935
David S. Millerc0700f92008-12-16 23:53:20 -08009936 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9937 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009938
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009939 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009940 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009941 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009942 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009943 }
9944
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009945 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009946 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009947 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009948
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009949 DP(NETIF_MSG_LINK,
9950 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009951
9952 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009953 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009954 bnx2x_link_set(bp);
9955 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009956
9957 return 0;
9958}
9959
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009960static int bnx2x_set_flags(struct net_device *dev, u32 data)
9961{
9962 struct bnx2x *bp = netdev_priv(dev);
9963 int changed = 0;
9964 int rc = 0;
9965
9966 /* TPA requires Rx CSUM offloading */
9967 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9968 if (!(dev->features & NETIF_F_LRO)) {
9969 dev->features |= NETIF_F_LRO;
9970 bp->flags |= TPA_ENABLE_FLAG;
9971 changed = 1;
9972 }
9973
9974 } else if (dev->features & NETIF_F_LRO) {
9975 dev->features &= ~NETIF_F_LRO;
9976 bp->flags &= ~TPA_ENABLE_FLAG;
9977 changed = 1;
9978 }
9979
9980 if (changed && netif_running(dev)) {
9981 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9982 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9983 }
9984
9985 return rc;
9986}
9987
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009988static u32 bnx2x_get_rx_csum(struct net_device *dev)
9989{
9990 struct bnx2x *bp = netdev_priv(dev);
9991
9992 return bp->rx_csum;
9993}
9994
9995static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9996{
9997 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009998 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009999
10000 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010001
10002 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10003 TPA'ed packets will be discarded due to wrong TCP CSUM */
10004 if (!data) {
10005 u32 flags = ethtool_op_get_flags(dev);
10006
10007 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10008 }
10009
10010 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010011}
10012
10013static int bnx2x_set_tso(struct net_device *dev, u32 data)
10014{
Eilon Greenstein755735e2008-06-23 20:35:13 -070010015 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010016 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010017 dev->features |= NETIF_F_TSO6;
10018 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010019 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010020 dev->features &= ~NETIF_F_TSO6;
10021 }
10022
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010023 return 0;
10024}
10025
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010026static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010027 char string[ETH_GSTRING_LEN];
10028} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010029 { "register_test (offline)" },
10030 { "memory_test (offline)" },
10031 { "loopback_test (offline)" },
10032 { "nvram_test (online)" },
10033 { "interrupt_test (online)" },
10034 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010035 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010036};
10037
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010038static int bnx2x_test_registers(struct bnx2x *bp)
10039{
10040 int idx, i, rc = -ENODEV;
10041 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010042 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010043 static const struct {
10044 u32 offset0;
10045 u32 offset1;
10046 u32 mask;
10047 } reg_tbl[] = {
10048/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10049 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10050 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10051 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10052 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10053 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10054 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10055 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10056 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10057 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10058/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10059 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10060 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10061 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10062 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10063 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10064 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10065 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010066 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010067 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10068/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010069 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10070 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10071 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10072 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10073 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10074 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10075 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10076 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010077 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10078/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010079 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10080 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10081 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10082 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10083 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10084 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10085
10086 { 0xffffffff, 0, 0x00000000 }
10087 };
10088
10089 if (!netif_running(bp->dev))
10090 return rc;
10091
10092 /* Repeat the test twice:
10093 First by writing 0x00000000, second by writing 0xffffffff */
10094 for (idx = 0; idx < 2; idx++) {
10095
10096 switch (idx) {
10097 case 0:
10098 wr_val = 0;
10099 break;
10100 case 1:
10101 wr_val = 0xffffffff;
10102 break;
10103 }
10104
10105 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10106 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010107
10108 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10109 mask = reg_tbl[i].mask;
10110
10111 save_val = REG_RD(bp, offset);
10112
10113 REG_WR(bp, offset, wr_val);
10114 val = REG_RD(bp, offset);
10115
10116 /* Restore the original register's value */
10117 REG_WR(bp, offset, save_val);
10118
10119 /* verify that value is as expected value */
10120 if ((val & mask) != (wr_val & mask))
10121 goto test_reg_exit;
10122 }
10123 }
10124
10125 rc = 0;
10126
10127test_reg_exit:
10128 return rc;
10129}
10130
10131static int bnx2x_test_memory(struct bnx2x *bp)
10132{
10133 int i, j, rc = -ENODEV;
10134 u32 val;
10135 static const struct {
10136 u32 offset;
10137 int size;
10138 } mem_tbl[] = {
10139 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10140 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10141 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10142 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10143 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10144 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10145 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10146
10147 { 0xffffffff, 0 }
10148 };
10149 static const struct {
10150 char *name;
10151 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010152 u32 e1_mask;
10153 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010154 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010155 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10156 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10157 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10158 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10159 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10160 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010161
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010162 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010163 };
10164
10165 if (!netif_running(bp->dev))
10166 return rc;
10167
10168 /* Go through all the memories */
10169 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10170 for (j = 0; j < mem_tbl[i].size; j++)
10171 REG_RD(bp, mem_tbl[i].offset + j*4);
10172
10173 /* Check the parity status */
10174 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10175 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010176 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10177 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010178 DP(NETIF_MSG_HW,
10179 "%s is 0x%x\n", prty_tbl[i].name, val);
10180 goto test_mem_exit;
10181 }
10182 }
10183
10184 rc = 0;
10185
10186test_mem_exit:
10187 return rc;
10188}
10189
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010190static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10191{
10192 int cnt = 1000;
10193
10194 if (link_up)
10195 while (bnx2x_link_test(bp) && cnt--)
10196 msleep(10);
10197}
10198
10199static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10200{
10201 unsigned int pkt_size, num_pkts, i;
10202 struct sk_buff *skb;
10203 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010204 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010205 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010206 u16 tx_start_idx, tx_idx;
10207 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010208 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010209 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010210 struct eth_tx_start_bd *tx_start_bd;
10211 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010212 dma_addr_t mapping;
10213 union eth_rx_cqe *cqe;
10214 u8 cqe_fp_flags;
10215 struct sw_rx_bd *rx_buf;
10216 u16 len;
10217 int rc = -ENODEV;
10218
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010219 /* check the loopback mode */
10220 switch (loopback_mode) {
10221 case BNX2X_PHY_LOOPBACK:
10222 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10223 return -EINVAL;
10224 break;
10225 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010226 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010227 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010228 break;
10229 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010230 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010231 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010232
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010233 /* prepare the loopback packet */
10234 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10235 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010236 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10237 if (!skb) {
10238 rc = -ENOMEM;
10239 goto test_loopback_exit;
10240 }
10241 packet = skb_put(skb, pkt_size);
10242 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010243 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10244 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010245 for (i = ETH_HLEN; i < pkt_size; i++)
10246 packet[i] = (unsigned char) (i & 0xff);
10247
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010248 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010249 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010250 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10251 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010252
Eilon Greensteinca003922009-08-12 22:53:28 -070010253 pkt_prod = fp_tx->tx_pkt_prod++;
10254 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10255 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010256 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010257 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010258
Eilon Greensteinca003922009-08-12 22:53:28 -070010259 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10260 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010261 mapping = pci_map_single(bp->pdev, skb->data,
10262 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010263 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10264 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10265 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10266 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10267 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10268 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10269 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10270 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10271
10272 /* turn on parsing and get a BD */
10273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10274 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10275
10276 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010277
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010278 wmb();
10279
Eilon Greensteinca003922009-08-12 22:53:28 -070010280 fp_tx->tx_db.data.prod += 2;
10281 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010282 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010283
10284 mmiowb();
10285
10286 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010287 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010288
10289 udelay(100);
10290
Eilon Greensteinca003922009-08-12 22:53:28 -070010291 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010292 if (tx_idx != tx_start_idx + num_pkts)
10293 goto test_loopback_exit;
10294
Eilon Greensteinca003922009-08-12 22:53:28 -070010295 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010296 if (rx_idx != rx_start_idx + num_pkts)
10297 goto test_loopback_exit;
10298
Eilon Greensteinca003922009-08-12 22:53:28 -070010299 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010300 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10301 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10302 goto test_loopback_rx_exit;
10303
10304 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10305 if (len != pkt_size)
10306 goto test_loopback_rx_exit;
10307
Eilon Greensteinca003922009-08-12 22:53:28 -070010308 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010309 skb = rx_buf->skb;
10310 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10311 for (i = ETH_HLEN; i < pkt_size; i++)
10312 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10313 goto test_loopback_rx_exit;
10314
10315 rc = 0;
10316
10317test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010318
Eilon Greensteinca003922009-08-12 22:53:28 -070010319 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10320 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10321 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10322 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010323
10324 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010325 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10326 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010327
10328test_loopback_exit:
10329 bp->link_params.loopback_mode = LOOPBACK_NONE;
10330
10331 return rc;
10332}
10333
10334static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10335{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010336 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010337
10338 if (!netif_running(bp->dev))
10339 return BNX2X_LOOPBACK_FAILED;
10340
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010341 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010342 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010343
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010344 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10345 if (res) {
10346 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10347 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010348 }
10349
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010350 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10351 if (res) {
10352 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10353 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010354 }
10355
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010356 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010357 bnx2x_netif_start(bp);
10358
10359 return rc;
10360}
10361
10362#define CRC32_RESIDUAL 0xdebb20e3
10363
10364static int bnx2x_test_nvram(struct bnx2x *bp)
10365{
10366 static const struct {
10367 int offset;
10368 int size;
10369 } nvram_tbl[] = {
10370 { 0, 0x14 }, /* bootstrap */
10371 { 0x14, 0xec }, /* dir */
10372 { 0x100, 0x350 }, /* manuf_info */
10373 { 0x450, 0xf0 }, /* feature_info */
10374 { 0x640, 0x64 }, /* upgrade_key_info */
10375 { 0x6a4, 0x64 },
10376 { 0x708, 0x70 }, /* manuf_key_info */
10377 { 0x778, 0x70 },
10378 { 0, 0 }
10379 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010380 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010381 u8 *data = (u8 *)buf;
10382 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010383 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010384
10385 rc = bnx2x_nvram_read(bp, 0, data, 4);
10386 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010387 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010388 goto test_nvram_exit;
10389 }
10390
10391 magic = be32_to_cpu(buf[0]);
10392 if (magic != 0x669955aa) {
10393 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10394 rc = -ENODEV;
10395 goto test_nvram_exit;
10396 }
10397
10398 for (i = 0; nvram_tbl[i].size; i++) {
10399
10400 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10401 nvram_tbl[i].size);
10402 if (rc) {
10403 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010404 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010405 goto test_nvram_exit;
10406 }
10407
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010408 crc = ether_crc_le(nvram_tbl[i].size, data);
10409 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010410 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010411 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010412 rc = -ENODEV;
10413 goto test_nvram_exit;
10414 }
10415 }
10416
10417test_nvram_exit:
10418 return rc;
10419}
10420
10421static int bnx2x_test_intr(struct bnx2x *bp)
10422{
10423 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10424 int i, rc;
10425
10426 if (!netif_running(bp->dev))
10427 return -ENODEV;
10428
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010429 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010430 if (CHIP_IS_E1(bp))
10431 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10432 else
10433 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010434 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010435 config->hdr.reserved1 = 0;
10436
Michael Chane665bfd2009-10-10 13:46:54 +000010437 bp->set_mac_pending++;
10438 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010439 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10440 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10441 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10442 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010443 for (i = 0; i < 10; i++) {
10444 if (!bp->set_mac_pending)
10445 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010446 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010447 msleep_interruptible(10);
10448 }
10449 if (i == 10)
10450 rc = -ENODEV;
10451 }
10452
10453 return rc;
10454}
10455
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010456static void bnx2x_self_test(struct net_device *dev,
10457 struct ethtool_test *etest, u64 *buf)
10458{
10459 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010460
10461 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10462
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010463 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010464 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010465
Eilon Greenstein33471622008-08-13 15:59:08 -070010466 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010467 if (IS_E1HMF(bp))
10468 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10469
10470 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010471 int port = BP_PORT(bp);
10472 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010473 u8 link_up;
10474
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010475 /* save current value of input enable for TX port IF */
10476 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10477 /* disable input for TX port IF */
10478 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10479
Eilon Greenstein061bc702009-10-15 00:18:47 -070010480 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010481 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10482 bnx2x_nic_load(bp, LOAD_DIAG);
10483 /* wait until link state is restored */
10484 bnx2x_wait_for_link(bp, link_up);
10485
10486 if (bnx2x_test_registers(bp) != 0) {
10487 buf[0] = 1;
10488 etest->flags |= ETH_TEST_FL_FAILED;
10489 }
10490 if (bnx2x_test_memory(bp) != 0) {
10491 buf[1] = 1;
10492 etest->flags |= ETH_TEST_FL_FAILED;
10493 }
10494 buf[2] = bnx2x_test_loopback(bp, link_up);
10495 if (buf[2] != 0)
10496 etest->flags |= ETH_TEST_FL_FAILED;
10497
10498 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010499
10500 /* restore input for TX port IF */
10501 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10502
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010503 bnx2x_nic_load(bp, LOAD_NORMAL);
10504 /* wait until link state is restored */
10505 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010506 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010507 if (bnx2x_test_nvram(bp) != 0) {
10508 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010509 etest->flags |= ETH_TEST_FL_FAILED;
10510 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010511 if (bnx2x_test_intr(bp) != 0) {
10512 buf[4] = 1;
10513 etest->flags |= ETH_TEST_FL_FAILED;
10514 }
10515 if (bp->port.pmf)
10516 if (bnx2x_link_test(bp) != 0) {
10517 buf[5] = 1;
10518 etest->flags |= ETH_TEST_FL_FAILED;
10519 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010520
10521#ifdef BNX2X_EXTRA_DEBUG
10522 bnx2x_panic_dump(bp);
10523#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010524}
10525
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010526static const struct {
10527 long offset;
10528 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010529 u8 string[ETH_GSTRING_LEN];
10530} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10531/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10532 { Q_STATS_OFFSET32(error_bytes_received_hi),
10533 8, "[%d]: rx_error_bytes" },
10534 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10535 8, "[%d]: rx_ucast_packets" },
10536 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10537 8, "[%d]: rx_mcast_packets" },
10538 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10539 8, "[%d]: rx_bcast_packets" },
10540 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10541 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10542 4, "[%d]: rx_phy_ip_err_discards"},
10543 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10544 4, "[%d]: rx_skb_alloc_discard" },
10545 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10546
10547/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10548 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10549 8, "[%d]: tx_packets" }
10550};
10551
10552static const struct {
10553 long offset;
10554 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010555 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010556#define STATS_FLAGS_PORT 1
10557#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010558#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010559 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010560} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010561/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10562 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010563 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010564 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010565 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010566 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010567 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010568 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010569 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010570 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010571 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010572 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010573 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010574 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010575 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10576 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10577 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10578 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10579/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10580 8, STATS_FLAGS_PORT, "rx_fragments" },
10581 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10582 8, STATS_FLAGS_PORT, "rx_jabbers" },
10583 { STATS_OFFSET32(no_buff_discard_hi),
10584 8, STATS_FLAGS_BOTH, "rx_discards" },
10585 { STATS_OFFSET32(mac_filter_discard),
10586 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10587 { STATS_OFFSET32(xxoverflow_discard),
10588 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10589 { STATS_OFFSET32(brb_drop_hi),
10590 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10591 { STATS_OFFSET32(brb_truncate_hi),
10592 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10593 { STATS_OFFSET32(pause_frames_received_hi),
10594 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10595 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10596 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10597 { STATS_OFFSET32(nig_timer_max),
10598 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10599/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10600 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10601 { STATS_OFFSET32(rx_skb_alloc_failed),
10602 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10603 { STATS_OFFSET32(hw_csum_err),
10604 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10605
10606 { STATS_OFFSET32(total_bytes_transmitted_hi),
10607 8, STATS_FLAGS_BOTH, "tx_bytes" },
10608 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10609 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10610 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10611 8, STATS_FLAGS_BOTH, "tx_packets" },
10612 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10613 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10614 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10615 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010616 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010617 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010618 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010619 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010620/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010621 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010622 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010623 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010624 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010625 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010626 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010627 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010628 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010629 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010630 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010631 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010632 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010633 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010634 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010635 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010636 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010637 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010638 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010639 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010640/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010641 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010642 { STATS_OFFSET32(pause_frames_sent_hi),
10643 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010644};
10645
Eilon Greensteinde832a52009-02-12 08:36:33 +000010646#define IS_PORT_STAT(i) \
10647 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10648#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10649#define IS_E1HMF_MODE_STAT(bp) \
10650 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010651
Ben Hutchings15f0a392009-10-01 11:58:24 +000010652static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10653{
10654 struct bnx2x *bp = netdev_priv(dev);
10655 int i, num_stats;
10656
10657 switch(stringset) {
10658 case ETH_SS_STATS:
10659 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010660 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000010661 if (!IS_E1HMF_MODE_STAT(bp))
10662 num_stats += BNX2X_NUM_STATS;
10663 } else {
10664 if (IS_E1HMF_MODE_STAT(bp)) {
10665 num_stats = 0;
10666 for (i = 0; i < BNX2X_NUM_STATS; i++)
10667 if (IS_FUNC_STAT(i))
10668 num_stats++;
10669 } else
10670 num_stats = BNX2X_NUM_STATS;
10671 }
10672 return num_stats;
10673
10674 case ETH_SS_TEST:
10675 return BNX2X_NUM_TESTS;
10676
10677 default:
10678 return -EINVAL;
10679 }
10680}
10681
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010682static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10683{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010684 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010685 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010686
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010687 switch (stringset) {
10688 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010689 if (is_multi(bp)) {
10690 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010691 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010692 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10693 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10694 bnx2x_q_stats_arr[j].string, i);
10695 k += BNX2X_NUM_Q_STATS;
10696 }
10697 if (IS_E1HMF_MODE_STAT(bp))
10698 break;
10699 for (j = 0; j < BNX2X_NUM_STATS; j++)
10700 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10701 bnx2x_stats_arr[j].string);
10702 } else {
10703 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10704 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10705 continue;
10706 strcpy(buf + j*ETH_GSTRING_LEN,
10707 bnx2x_stats_arr[i].string);
10708 j++;
10709 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010710 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010711 break;
10712
10713 case ETH_SS_TEST:
10714 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10715 break;
10716 }
10717}
10718
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010719static void bnx2x_get_ethtool_stats(struct net_device *dev,
10720 struct ethtool_stats *stats, u64 *buf)
10721{
10722 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010723 u32 *hw_stats, *offset;
10724 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010725
Eilon Greensteinde832a52009-02-12 08:36:33 +000010726 if (is_multi(bp)) {
10727 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010728 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010729 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10730 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10731 if (bnx2x_q_stats_arr[j].size == 0) {
10732 /* skip this counter */
10733 buf[k + j] = 0;
10734 continue;
10735 }
10736 offset = (hw_stats +
10737 bnx2x_q_stats_arr[j].offset);
10738 if (bnx2x_q_stats_arr[j].size == 4) {
10739 /* 4-byte counter */
10740 buf[k + j] = (u64) *offset;
10741 continue;
10742 }
10743 /* 8-byte counter */
10744 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10745 }
10746 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010747 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010748 if (IS_E1HMF_MODE_STAT(bp))
10749 return;
10750 hw_stats = (u32 *)&bp->eth_stats;
10751 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10752 if (bnx2x_stats_arr[j].size == 0) {
10753 /* skip this counter */
10754 buf[k + j] = 0;
10755 continue;
10756 }
10757 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10758 if (bnx2x_stats_arr[j].size == 4) {
10759 /* 4-byte counter */
10760 buf[k + j] = (u64) *offset;
10761 continue;
10762 }
10763 /* 8-byte counter */
10764 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010765 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010766 } else {
10767 hw_stats = (u32 *)&bp->eth_stats;
10768 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10769 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10770 continue;
10771 if (bnx2x_stats_arr[i].size == 0) {
10772 /* skip this counter */
10773 buf[j] = 0;
10774 j++;
10775 continue;
10776 }
10777 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10778 if (bnx2x_stats_arr[i].size == 4) {
10779 /* 4-byte counter */
10780 buf[j] = (u64) *offset;
10781 j++;
10782 continue;
10783 }
10784 /* 8-byte counter */
10785 buf[j] = HILO_U64(*offset, *(offset + 1));
10786 j++;
10787 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010788 }
10789}
10790
10791static int bnx2x_phys_id(struct net_device *dev, u32 data)
10792{
10793 struct bnx2x *bp = netdev_priv(dev);
10794 int i;
10795
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010796 if (!netif_running(dev))
10797 return 0;
10798
10799 if (!bp->port.pmf)
10800 return 0;
10801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010802 if (data == 0)
10803 data = 2;
10804
10805 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010806 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010807 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10808 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010809 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010810 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010811
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010812 msleep_interruptible(500);
10813 if (signal_pending(current))
10814 break;
10815 }
10816
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010817 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010818 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10819 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010820
10821 return 0;
10822}
10823
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010824static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010825 .get_settings = bnx2x_get_settings,
10826 .set_settings = bnx2x_set_settings,
10827 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010828 .get_regs_len = bnx2x_get_regs_len,
10829 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010830 .get_wol = bnx2x_get_wol,
10831 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010832 .get_msglevel = bnx2x_get_msglevel,
10833 .set_msglevel = bnx2x_set_msglevel,
10834 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010835 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010836 .get_eeprom_len = bnx2x_get_eeprom_len,
10837 .get_eeprom = bnx2x_get_eeprom,
10838 .set_eeprom = bnx2x_set_eeprom,
10839 .get_coalesce = bnx2x_get_coalesce,
10840 .set_coalesce = bnx2x_set_coalesce,
10841 .get_ringparam = bnx2x_get_ringparam,
10842 .set_ringparam = bnx2x_set_ringparam,
10843 .get_pauseparam = bnx2x_get_pauseparam,
10844 .set_pauseparam = bnx2x_set_pauseparam,
10845 .get_rx_csum = bnx2x_get_rx_csum,
10846 .set_rx_csum = bnx2x_set_rx_csum,
10847 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010848 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010849 .set_flags = bnx2x_set_flags,
10850 .get_flags = ethtool_op_get_flags,
10851 .get_sg = ethtool_op_get_sg,
10852 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010853 .get_tso = ethtool_op_get_tso,
10854 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010855 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010856 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010857 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010858 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010859 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010860};
10861
10862/* end of ethtool_ops */
10863
10864/****************************************************************************
10865* General service functions
10866****************************************************************************/
10867
10868static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10869{
10870 u16 pmcsr;
10871
10872 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10873
10874 switch (state) {
10875 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010876 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010877 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10878 PCI_PM_CTRL_PME_STATUS));
10879
10880 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010881 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010882 msleep(20);
10883 break;
10884
10885 case PCI_D3hot:
10886 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10887 pmcsr |= 3;
10888
10889 if (bp->wol)
10890 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10891
10892 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10893 pmcsr);
10894
10895 /* No more memory access after this point until
10896 * device is brought back to D0.
10897 */
10898 break;
10899
10900 default:
10901 return -EINVAL;
10902 }
10903 return 0;
10904}
10905
Eilon Greenstein237907c2009-01-14 06:42:44 +000010906static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10907{
10908 u16 rx_cons_sb;
10909
10910 /* Tell compiler that status block fields can change */
10911 barrier();
10912 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10913 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10914 rx_cons_sb++;
10915 return (fp->rx_comp_cons != rx_cons_sb);
10916}
10917
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010918/*
10919 * net_device service functions
10920 */
10921
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010922static int bnx2x_poll(struct napi_struct *napi, int budget)
10923{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010924 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010925 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10926 napi);
10927 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010928
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010929 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010930#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010931 if (unlikely(bp->panic)) {
10932 napi_complete(napi);
10933 return 0;
10934 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010935#endif
10936
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010937 if (bnx2x_has_tx_work(fp))
10938 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010939
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010940 if (bnx2x_has_rx_work(fp)) {
10941 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010942
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010943 /* must not complete if we consumed full budget */
10944 if (work_done >= budget)
10945 break;
10946 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010947
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010948 /* Fall out from the NAPI loop if needed */
10949 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10950 bnx2x_update_fpsb_idx(fp);
10951 /* bnx2x_has_rx_work() reads the status block, thus we need
10952 * to ensure that status block indices have been actually read
10953 * (bnx2x_update_fpsb_idx) prior to this check
10954 * (bnx2x_has_rx_work) so that we won't write the "newer"
10955 * value of the status block to IGU (if there was a DMA right
10956 * after bnx2x_has_rx_work and if there is no rmb, the memory
10957 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10958 * before bnx2x_ack_sb). In this case there will never be
10959 * another interrupt until there is another update of the
10960 * status block, while there is still unhandled work.
10961 */
10962 rmb();
10963
10964 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10965 napi_complete(napi);
10966 /* Re-enable interrupts */
10967 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10968 le16_to_cpu(fp->fp_c_idx),
10969 IGU_INT_NOP, 1);
10970 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10971 le16_to_cpu(fp->fp_u_idx),
10972 IGU_INT_ENABLE, 1);
10973 break;
10974 }
10975 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000010976 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010977
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010978 return work_done;
10979}
10980
Eilon Greenstein755735e2008-06-23 20:35:13 -070010981
10982/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010983 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070010984 * we use one mapping for both BDs
10985 * So far this has only been observed to happen
10986 * in Other Operating Systems(TM)
10987 */
10988static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10989 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070010990 struct sw_tx_bd *tx_buf,
10991 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010992 u16 bd_prod, int nbd)
10993{
Eilon Greensteinca003922009-08-12 22:53:28 -070010994 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010995 struct eth_tx_bd *d_tx_bd;
10996 dma_addr_t mapping;
10997 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10998
10999 /* first fix first BD */
11000 h_tx_bd->nbd = cpu_to_le16(nbd);
11001 h_tx_bd->nbytes = cpu_to_le16(hlen);
11002
11003 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11004 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11005 h_tx_bd->addr_lo, h_tx_bd->nbd);
11006
11007 /* now get a new data BD
11008 * (after the pbd) and fill it */
11009 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011010 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011011
11012 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11013 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11014
11015 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11016 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11017 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011018
11019 /* this marks the BD as one that has no individual mapping */
11020 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11021
Eilon Greenstein755735e2008-06-23 20:35:13 -070011022 DP(NETIF_MSG_TX_QUEUED,
11023 "TSO split data size is %d (%x:%x)\n",
11024 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11025
Eilon Greensteinca003922009-08-12 22:53:28 -070011026 /* update tx_bd */
11027 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011028
11029 return bd_prod;
11030}
11031
11032static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11033{
11034 if (fix > 0)
11035 csum = (u16) ~csum_fold(csum_sub(csum,
11036 csum_partial(t_header - fix, fix, 0)));
11037
11038 else if (fix < 0)
11039 csum = (u16) ~csum_fold(csum_add(csum,
11040 csum_partial(t_header, -fix, 0)));
11041
11042 return swab16(csum);
11043}
11044
11045static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11046{
11047 u32 rc;
11048
11049 if (skb->ip_summed != CHECKSUM_PARTIAL)
11050 rc = XMIT_PLAIN;
11051
11052 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011053 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070011054 rc = XMIT_CSUM_V6;
11055 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11056 rc |= XMIT_CSUM_TCP;
11057
11058 } else {
11059 rc = XMIT_CSUM_V4;
11060 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11061 rc |= XMIT_CSUM_TCP;
11062 }
11063 }
11064
11065 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011066 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011067
11068 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011069 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011070
11071 return rc;
11072}
11073
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011074#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011075/* check if packet requires linearization (packet is too fragmented)
11076 no need to check fragmentation if page size > 8K (there will be no
11077 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011078static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11079 u32 xmit_type)
11080{
11081 int to_copy = 0;
11082 int hlen = 0;
11083 int first_bd_sz = 0;
11084
11085 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11086 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11087
11088 if (xmit_type & XMIT_GSO) {
11089 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11090 /* Check if LSO packet needs to be copied:
11091 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11092 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011093 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011094 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11095 int wnd_idx = 0;
11096 int frag_idx = 0;
11097 u32 wnd_sum = 0;
11098
11099 /* Headers length */
11100 hlen = (int)(skb_transport_header(skb) - skb->data) +
11101 tcp_hdrlen(skb);
11102
11103 /* Amount of data (w/o headers) on linear part of SKB*/
11104 first_bd_sz = skb_headlen(skb) - hlen;
11105
11106 wnd_sum = first_bd_sz;
11107
11108 /* Calculate the first sum - it's special */
11109 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11110 wnd_sum +=
11111 skb_shinfo(skb)->frags[frag_idx].size;
11112
11113 /* If there was data on linear skb data - check it */
11114 if (first_bd_sz > 0) {
11115 if (unlikely(wnd_sum < lso_mss)) {
11116 to_copy = 1;
11117 goto exit_lbl;
11118 }
11119
11120 wnd_sum -= first_bd_sz;
11121 }
11122
11123 /* Others are easier: run through the frag list and
11124 check all windows */
11125 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11126 wnd_sum +=
11127 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11128
11129 if (unlikely(wnd_sum < lso_mss)) {
11130 to_copy = 1;
11131 break;
11132 }
11133 wnd_sum -=
11134 skb_shinfo(skb)->frags[wnd_idx].size;
11135 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070011136 } else {
11137 /* in non-LSO too fragmented packet should always
11138 be linearized */
11139 to_copy = 1;
11140 }
11141 }
11142
11143exit_lbl:
11144 if (unlikely(to_copy))
11145 DP(NETIF_MSG_TX_QUEUED,
11146 "Linearization IS REQUIRED for %s packet. "
11147 "num_frags %d hlen %d first_bd_sz %d\n",
11148 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11149 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11150
11151 return to_copy;
11152}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011153#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011154
11155/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011156 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070011157 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011158 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011159static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011160{
11161 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011162 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011163 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011164 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011165 struct eth_tx_start_bd *tx_start_bd;
11166 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011167 struct eth_tx_parse_bd *pbd = NULL;
11168 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011169 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011170 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011171 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011172 int i;
11173 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011174 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011175
11176#ifdef BNX2X_STOP_ON_ERROR
11177 if (unlikely(bp->panic))
11178 return NETDEV_TX_BUSY;
11179#endif
11180
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011181 fp_index = skb_get_queue_mapping(skb);
11182 txq = netdev_get_tx_queue(dev, fp_index);
11183
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011184 fp = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070011185
Yitchak Gertner231fd582008-08-25 15:27:06 -070011186 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011187 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011188 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011189 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11190 return NETDEV_TX_BUSY;
11191 }
11192
Eilon Greenstein755735e2008-06-23 20:35:13 -070011193 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11194 " gso type %x xmit_type %x\n",
11195 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11196 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11197
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011198#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011199 /* First, check if we need to linearize the skb (due to FW
11200 restrictions). No need to check fragmentation if page size > 8K
11201 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011202 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11203 /* Statistics of linearization */
11204 bp->lin_cnt++;
11205 if (skb_linearize(skb) != 0) {
11206 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11207 "silently dropping this SKB\n");
11208 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011209 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011210 }
11211 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011212#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011213
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011214 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070011215 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011216 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735e2008-06-23 20:35:13 -070011217 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011218 (don't forget to mark the last one as last,
11219 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070011220 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011221 */
11222
11223 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011224 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011225
Eilon Greenstein755735e2008-06-23 20:35:13 -070011226 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011227 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011228 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011229
Eilon Greensteinca003922009-08-12 22:53:28 -070011230 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11231 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11232 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011233 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011234 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011235
Eilon Greenstein755735e2008-06-23 20:35:13 -070011236 /* remember the first BD of the packet */
11237 tx_buf->first_bd = fp->tx_bd_prod;
11238 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011239 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011240
11241 DP(NETIF_MSG_TX_QUEUED,
11242 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011243 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011244
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011245#ifdef BCM_VLAN
11246 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11247 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011248 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11249 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011250 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011251#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011252 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011253
Eilon Greensteinca003922009-08-12 22:53:28 -070011254 /* turn on parsing and get a BD */
11255 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11256 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011257
Eilon Greensteinca003922009-08-12 22:53:28 -070011258 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011259
11260 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011261 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011262
11263 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011264 pbd->global_data =
11265 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11266 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011267
11268 pbd->ip_hlen = (skb_transport_header(skb) -
11269 skb_network_header(skb)) / 2;
11270
11271 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11272
11273 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011274 hlen = hlen*2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011275
Eilon Greensteinca003922009-08-12 22:53:28 -070011276 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011277
11278 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011279 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735e2008-06-23 20:35:13 -070011280 ETH_TX_BD_FLAGS_IP_CSUM;
11281 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011282 tx_start_bd->bd_flags.as_bitfield |=
11283 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011284
11285 if (xmit_type & XMIT_CSUM_TCP) {
11286 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11287
11288 } else {
11289 s8 fix = SKB_CS_OFF(skb); /* signed! */
11290
Eilon Greensteinca003922009-08-12 22:53:28 -070011291 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011292
11293 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011294 "hlen %d fix %d csum before fix %x\n",
11295 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011296
11297 /* HW bug: fixup the CSUM */
11298 pbd->tcp_pseudo_csum =
11299 bnx2x_csum_fix(skb_transport_header(skb),
11300 SKB_CS(skb), fix);
11301
11302 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11303 pbd->tcp_pseudo_csum);
11304 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011305 }
11306
11307 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011308 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011309
Eilon Greensteinca003922009-08-12 22:53:28 -070011310 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11311 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11312 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11313 tx_start_bd->nbd = cpu_to_le16(nbd);
11314 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11315 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011316
11317 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070011318 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011319 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11320 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11321 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011322
Eilon Greenstein755735e2008-06-23 20:35:13 -070011323 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011324
11325 DP(NETIF_MSG_TX_QUEUED,
11326 "TSO packet len %d hlen %d total len %d tso size %d\n",
11327 skb->len, hlen, skb_headlen(skb),
11328 skb_shinfo(skb)->gso_size);
11329
Eilon Greensteinca003922009-08-12 22:53:28 -070011330 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011331
Eilon Greenstein755735e2008-06-23 20:35:13 -070011332 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011333 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11334 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011335
11336 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11337 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011338 pbd->tcp_flags = pbd_tcp_flags(skb);
11339
11340 if (xmit_type & XMIT_GSO_V4) {
11341 pbd->ip_id = swab16(ip_hdr(skb)->id);
11342 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011343 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11344 ip_hdr(skb)->daddr,
11345 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011346
11347 } else
11348 pbd->tcp_pseudo_csum =
11349 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11350 &ipv6_hdr(skb)->daddr,
11351 0, IPPROTO_TCP, 0));
11352
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011353 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11354 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011355 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011356
Eilon Greenstein755735e2008-06-23 20:35:13 -070011357 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11358 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011359
Eilon Greenstein755735e2008-06-23 20:35:13 -070011360 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011361 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11362 if (total_pkt_bd == NULL)
11363 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011364
Eilon Greenstein755735e2008-06-23 20:35:13 -070011365 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11366 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011367
Eilon Greensteinca003922009-08-12 22:53:28 -070011368 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11369 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11370 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11371 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011372
Eilon Greenstein755735e2008-06-23 20:35:13 -070011373 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011374 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11375 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11376 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011377 }
11378
Eilon Greensteinca003922009-08-12 22:53:28 -070011379 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011380
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011381 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11382
Eilon Greenstein755735e2008-06-23 20:35:13 -070011383 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011384 * if the packet contains or ends with it
11385 */
11386 if (TX_BD_POFF(bd_prod) < nbd)
11387 nbd++;
11388
Eilon Greensteinca003922009-08-12 22:53:28 -070011389 if (total_pkt_bd != NULL)
11390 total_pkt_bd->total_pkt_bytes = pkt_size;
11391
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011392 if (pbd)
11393 DP(NETIF_MSG_TX_QUEUED,
11394 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11395 " tcp_flags %x xsum %x seq %u hlen %u\n",
11396 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11397 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011398 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011399
Eilon Greenstein755735e2008-06-23 20:35:13 -070011400 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011401
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011402 /*
11403 * Make sure that the BD data is updated before updating the producer
11404 * since FW might read the BD right after the producer is updated.
11405 * This is only applicable for weak-ordered memory model archs such
11406 * as IA-64. The following barrier is also mandatory since FW will
11407 * assumes packets must have BDs.
11408 */
11409 wmb();
11410
Eilon Greensteinca003922009-08-12 22:53:28 -070011411 fp->tx_db.data.prod += nbd;
11412 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011413 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011414
11415 mmiowb();
11416
Eilon Greenstein755735e2008-06-23 20:35:13 -070011417 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011418
11419 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011420 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011421 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11422 if we put Tx into XOFF state. */
11423 smp_mb();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011424 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011425 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011426 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011427 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011428 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011429
11430 return NETDEV_TX_OK;
11431}
11432
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011433/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011434static int bnx2x_open(struct net_device *dev)
11435{
11436 struct bnx2x *bp = netdev_priv(dev);
11437
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011438 netif_carrier_off(dev);
11439
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011440 bnx2x_set_power_state(bp, PCI_D0);
11441
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011442 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011443}
11444
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011445/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011446static int bnx2x_close(struct net_device *dev)
11447{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011448 struct bnx2x *bp = netdev_priv(dev);
11449
11450 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011451 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11452 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11453 if (!CHIP_REV_IS_SLOW(bp))
11454 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011455
11456 return 0;
11457}
11458
Eilon Greensteinf5372252009-02-12 08:38:30 +000011459/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011460static void bnx2x_set_rx_mode(struct net_device *dev)
11461{
11462 struct bnx2x *bp = netdev_priv(dev);
11463 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11464 int port = BP_PORT(bp);
11465
11466 if (bp->state != BNX2X_STATE_OPEN) {
11467 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11468 return;
11469 }
11470
11471 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11472
11473 if (dev->flags & IFF_PROMISC)
11474 rx_mode = BNX2X_RX_MODE_PROMISC;
11475
11476 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011477 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11478 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011479 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11480
11481 else { /* some multicasts */
11482 if (CHIP_IS_E1(bp)) {
11483 int i, old, offset;
11484 struct dev_mc_list *mclist;
11485 struct mac_configuration_cmd *config =
11486 bnx2x_sp(bp, mcast_config);
11487
11488 for (i = 0, mclist = dev->mc_list;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011489 mclist && (i < netdev_mc_count(dev));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011490 i++, mclist = mclist->next) {
11491
11492 config->config_table[i].
11493 cam_entry.msb_mac_addr =
11494 swab16(*(u16 *)&mclist->dmi_addr[0]);
11495 config->config_table[i].
11496 cam_entry.middle_mac_addr =
11497 swab16(*(u16 *)&mclist->dmi_addr[2]);
11498 config->config_table[i].
11499 cam_entry.lsb_mac_addr =
11500 swab16(*(u16 *)&mclist->dmi_addr[4]);
11501 config->config_table[i].cam_entry.flags =
11502 cpu_to_le16(port);
11503 config->config_table[i].
11504 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011505 config->config_table[i].target_table_entry.
11506 clients_bit_vector =
11507 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011508 config->config_table[i].
11509 target_table_entry.vlan_id = 0;
11510
11511 DP(NETIF_MSG_IFUP,
11512 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11513 config->config_table[i].
11514 cam_entry.msb_mac_addr,
11515 config->config_table[i].
11516 cam_entry.middle_mac_addr,
11517 config->config_table[i].
11518 cam_entry.lsb_mac_addr);
11519 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011520 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011521 if (old > i) {
11522 for (; i < old; i++) {
11523 if (CAM_IS_INVALID(config->
11524 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011525 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011526 break;
11527 }
11528 /* invalidate */
11529 CAM_INVALIDATE(config->
11530 config_table[i]);
11531 }
11532 }
11533
11534 if (CHIP_REV_IS_SLOW(bp))
11535 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11536 else
11537 offset = BNX2X_MAX_MULTICAST*(1 + port);
11538
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011539 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011540 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011541 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011542 config->hdr.reserved1 = 0;
11543
Michael Chane665bfd2009-10-10 13:46:54 +000011544 bp->set_mac_pending++;
11545 smp_wmb();
11546
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011547 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11548 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11549 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11550 0);
11551 } else { /* E1H */
11552 /* Accept one or more multicasts */
11553 struct dev_mc_list *mclist;
11554 u32 mc_filter[MC_HASH_SIZE];
11555 u32 crc, bit, regidx;
11556 int i;
11557
11558 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11559
11560 for (i = 0, mclist = dev->mc_list;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011561 mclist && (i < netdev_mc_count(dev));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011562 i++, mclist = mclist->next) {
11563
Johannes Berg7c510e42008-10-27 17:47:26 -070011564 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11565 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011566
11567 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11568 bit = (crc >> 24) & 0xff;
11569 regidx = bit >> 5;
11570 bit &= 0x1f;
11571 mc_filter[regidx] |= (1 << bit);
11572 }
11573
11574 for (i = 0; i < MC_HASH_SIZE; i++)
11575 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11576 mc_filter[i]);
11577 }
11578 }
11579
11580 bp->rx_mode = rx_mode;
11581 bnx2x_set_storm_rx_mode(bp);
11582}
11583
11584/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011585static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11586{
11587 struct sockaddr *addr = p;
11588 struct bnx2x *bp = netdev_priv(dev);
11589
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011590 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011591 return -EINVAL;
11592
11593 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011594 if (netif_running(dev)) {
11595 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011596 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011597 else
Michael Chane665bfd2009-10-10 13:46:54 +000011598 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011599 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011600
11601 return 0;
11602}
11603
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011604/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011605static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11606 int devad, u16 addr)
11607{
11608 struct bnx2x *bp = netdev_priv(netdev);
11609 u16 value;
11610 int rc;
11611 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11612
11613 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11614 prtad, devad, addr);
11615
11616 if (prtad != bp->mdio.prtad) {
11617 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11618 prtad, bp->mdio.prtad);
11619 return -EINVAL;
11620 }
11621
11622 /* The HW expects different devad if CL22 is used */
11623 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11624
11625 bnx2x_acquire_phy_lock(bp);
11626 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11627 devad, addr, &value);
11628 bnx2x_release_phy_lock(bp);
11629 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11630
11631 if (!rc)
11632 rc = value;
11633 return rc;
11634}
11635
11636/* called with rtnl_lock */
11637static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11638 u16 addr, u16 value)
11639{
11640 struct bnx2x *bp = netdev_priv(netdev);
11641 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11642 int rc;
11643
11644 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11645 " value 0x%x\n", prtad, devad, addr, value);
11646
11647 if (prtad != bp->mdio.prtad) {
11648 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11649 prtad, bp->mdio.prtad);
11650 return -EINVAL;
11651 }
11652
11653 /* The HW expects different devad if CL22 is used */
11654 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11655
11656 bnx2x_acquire_phy_lock(bp);
11657 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11658 devad, addr, value);
11659 bnx2x_release_phy_lock(bp);
11660 return rc;
11661}
11662
11663/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011664static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11665{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011666 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011667 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011668
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011669 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11670 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011671
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011672 if (!netif_running(dev))
11673 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011674
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011675 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011676}
11677
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011678/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011679static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11680{
11681 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011682 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011683
11684 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11685 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11686 return -EINVAL;
11687
11688 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011689 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011690 * only updated as part of load
11691 */
11692 dev->mtu = new_mtu;
11693
11694 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011695 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11696 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011697 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011698
11699 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011700}
11701
11702static void bnx2x_tx_timeout(struct net_device *dev)
11703{
11704 struct bnx2x *bp = netdev_priv(dev);
11705
11706#ifdef BNX2X_STOP_ON_ERROR
11707 if (!bp->panic)
11708 bnx2x_panic();
11709#endif
11710 /* This allows the netif to be shutdown gracefully before resetting */
11711 schedule_work(&bp->reset_task);
11712}
11713
11714#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011715/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011716static void bnx2x_vlan_rx_register(struct net_device *dev,
11717 struct vlan_group *vlgrp)
11718{
11719 struct bnx2x *bp = netdev_priv(dev);
11720
11721 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011722
11723 /* Set flags according to the required capabilities */
11724 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11725
11726 if (dev->features & NETIF_F_HW_VLAN_TX)
11727 bp->flags |= HW_VLAN_TX_FLAG;
11728
11729 if (dev->features & NETIF_F_HW_VLAN_RX)
11730 bp->flags |= HW_VLAN_RX_FLAG;
11731
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011732 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011733 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011734}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011735
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011736#endif
11737
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000011738#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011739static void poll_bnx2x(struct net_device *dev)
11740{
11741 struct bnx2x *bp = netdev_priv(dev);
11742
11743 disable_irq(bp->pdev->irq);
11744 bnx2x_interrupt(bp->pdev->irq, dev);
11745 enable_irq(bp->pdev->irq);
11746}
11747#endif
11748
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011749static const struct net_device_ops bnx2x_netdev_ops = {
11750 .ndo_open = bnx2x_open,
11751 .ndo_stop = bnx2x_close,
11752 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011753 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011754 .ndo_set_mac_address = bnx2x_change_mac_addr,
11755 .ndo_validate_addr = eth_validate_addr,
11756 .ndo_do_ioctl = bnx2x_ioctl,
11757 .ndo_change_mtu = bnx2x_change_mtu,
11758 .ndo_tx_timeout = bnx2x_tx_timeout,
11759#ifdef BCM_VLAN
11760 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11761#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000011762#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011763 .ndo_poll_controller = poll_bnx2x,
11764#endif
11765};
11766
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011767static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11768 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011769{
11770 struct bnx2x *bp;
11771 int rc;
11772
11773 SET_NETDEV_DEV(dev, &pdev->dev);
11774 bp = netdev_priv(dev);
11775
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011776 bp->dev = dev;
11777 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011778 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011779 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011780
11781 rc = pci_enable_device(pdev);
11782 if (rc) {
11783 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11784 goto err_out;
11785 }
11786
11787 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11788 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11789 " aborting\n");
11790 rc = -ENODEV;
11791 goto err_out_disable;
11792 }
11793
11794 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11795 printk(KERN_ERR PFX "Cannot find second PCI device"
11796 " base address, aborting\n");
11797 rc = -ENODEV;
11798 goto err_out_disable;
11799 }
11800
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011801 if (atomic_read(&pdev->enable_cnt) == 1) {
11802 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11803 if (rc) {
11804 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11805 " aborting\n");
11806 goto err_out_disable;
11807 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011808
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011809 pci_set_master(pdev);
11810 pci_save_state(pdev);
11811 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011812
11813 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11814 if (bp->pm_cap == 0) {
11815 printk(KERN_ERR PFX "Cannot find power management"
11816 " capability, aborting\n");
11817 rc = -EIO;
11818 goto err_out_release;
11819 }
11820
11821 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11822 if (bp->pcie_cap == 0) {
11823 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11824 " aborting\n");
11825 rc = -EIO;
11826 goto err_out_release;
11827 }
11828
Yang Hongyang6a355282009-04-06 19:01:13 -070011829 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011830 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011831 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011832 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11833 " failed, aborting\n");
11834 rc = -EIO;
11835 goto err_out_release;
11836 }
11837
Yang Hongyang284901a2009-04-06 19:01:15 -070011838 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011839 printk(KERN_ERR PFX "System does not support DMA,"
11840 " aborting\n");
11841 rc = -EIO;
11842 goto err_out_release;
11843 }
11844
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011845 dev->mem_start = pci_resource_start(pdev, 0);
11846 dev->base_addr = dev->mem_start;
11847 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011848
11849 dev->irq = pdev->irq;
11850
Arjan van de Ven275f1652008-10-20 21:42:39 -070011851 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011852 if (!bp->regview) {
11853 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11854 rc = -ENOMEM;
11855 goto err_out_release;
11856 }
11857
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011858 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11859 min_t(u64, BNX2X_DB_SIZE,
11860 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011861 if (!bp->doorbells) {
11862 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11863 rc = -ENOMEM;
11864 goto err_out_unmap;
11865 }
11866
11867 bnx2x_set_power_state(bp, PCI_D0);
11868
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011869 /* clean indirect addresses */
11870 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11871 PCICFG_VENDOR_ID_OFFSET);
11872 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11873 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11874 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11875 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011876
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011877 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011878
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011879 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011880 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011881 dev->features |= NETIF_F_SG;
11882 dev->features |= NETIF_F_HW_CSUM;
11883 if (bp->flags & USING_DAC_FLAG)
11884 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011885 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11886 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011887#ifdef BCM_VLAN
11888 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011889 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011890
11891 dev->vlan_features |= NETIF_F_SG;
11892 dev->vlan_features |= NETIF_F_HW_CSUM;
11893 if (bp->flags & USING_DAC_FLAG)
11894 dev->vlan_features |= NETIF_F_HIGHDMA;
11895 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11896 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011897#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011898
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011899 /* get_port_hwinfo() will set prtad and mmds properly */
11900 bp->mdio.prtad = MDIO_PRTAD_NONE;
11901 bp->mdio.mmds = 0;
11902 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11903 bp->mdio.dev = dev;
11904 bp->mdio.mdio_read = bnx2x_mdio_read;
11905 bp->mdio.mdio_write = bnx2x_mdio_write;
11906
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011907 return 0;
11908
11909err_out_unmap:
11910 if (bp->regview) {
11911 iounmap(bp->regview);
11912 bp->regview = NULL;
11913 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011914 if (bp->doorbells) {
11915 iounmap(bp->doorbells);
11916 bp->doorbells = NULL;
11917 }
11918
11919err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011920 if (atomic_read(&pdev->enable_cnt) == 1)
11921 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011922
11923err_out_disable:
11924 pci_disable_device(pdev);
11925 pci_set_drvdata(pdev, NULL);
11926
11927err_out:
11928 return rc;
11929}
11930
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011931static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11932 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011933{
11934 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11935
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011936 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11937
11938 /* return value of 1=2.5GHz 2=5GHz */
11939 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011940}
11941
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011942static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11943{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011944 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011945 struct bnx2x_fw_file_hdr *fw_hdr;
11946 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011947 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011948 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011949 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011950 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011951
11952 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11953 return -EINVAL;
11954
11955 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11956 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11957
11958 /* Make sure none of the offsets and sizes make us read beyond
11959 * the end of the firmware data */
11960 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11961 offset = be32_to_cpu(sections[i].offset);
11962 len = be32_to_cpu(sections[i].len);
11963 if (offset + len > firmware->size) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011964 printk(KERN_ERR PFX "Section %d length is out of "
11965 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011966 return -EINVAL;
11967 }
11968 }
11969
11970 /* Likewise for the init_ops offsets */
11971 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11972 ops_offsets = (u16 *)(firmware->data + offset);
11973 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11974
11975 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11976 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011977 printk(KERN_ERR PFX "Section offset %d is out of "
11978 "bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011979 return -EINVAL;
11980 }
11981 }
11982
11983 /* Check FW version */
11984 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11985 fw_ver = firmware->data + offset;
11986 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11991 " Should be %d.%d.%d.%d\n",
11992 fw_ver[0], fw_ver[1], fw_ver[2],
11993 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11994 BCM_5710_FW_MINOR_VERSION,
11995 BCM_5710_FW_REVISION_VERSION,
11996 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011997 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011998 }
11999
12000 return 0;
12001}
12002
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012003static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012004{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012005 const __be32 *source = (const __be32 *)_source;
12006 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012007 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012008
12009 for (i = 0; i < n/4; i++)
12010 target[i] = be32_to_cpu(source[i]);
12011}
12012
12013/*
12014 Ops array is stored in the following format:
12015 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12016 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012017static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012018{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012019 const __be32 *source = (const __be32 *)_source;
12020 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012021 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012022
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012023 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012024 tmp = be32_to_cpu(source[j]);
12025 target[i].op = (tmp >> 24) & 0xff;
12026 target[i].offset = tmp & 0xffffff;
12027 target[i].raw_data = be32_to_cpu(source[j+1]);
12028 }
12029}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012030
12031static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012032{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012033 const __be16 *source = (const __be16 *)_source;
12034 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012035 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012036
12037 for (i = 0; i < n/2; i++)
12038 target[i] = be16_to_cpu(source[i]);
12039}
12040
12041#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012042 do { \
12043 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12044 bp->arr = kmalloc(len, GFP_KERNEL); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012045 if (!bp->arr) { \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012046 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12047 "for "#arr"\n", len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012048 goto lbl; \
12049 } \
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012050 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12051 (u8 *)bp->arr, len); \
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012052 } while (0)
12053
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012054static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12055{
Ben Hutchings45229b42009-11-07 11:53:39 +000012056 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012057 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012058 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012060 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012061 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012062 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012063 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012064
12065 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12066
12067 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12068 if (rc) {
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012069 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12070 fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012071 goto request_firmware_exit;
12072 }
12073
12074 rc = bnx2x_check_firmware(bp);
12075 if (rc) {
12076 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12077 goto request_firmware_exit;
12078 }
12079
12080 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12081
12082 /* Initialize the pointers to the init arrays */
12083 /* Blob */
12084 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12085
12086 /* Opcodes */
12087 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12088
12089 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012090 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12091 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012092
12093 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012094 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12095 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12096 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12097 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12098 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12099 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12100 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12101 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12102 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12103 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12104 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12105 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12106 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12107 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12108 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12109 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012110
12111 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012112
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012113init_offsets_alloc_err:
12114 kfree(bp->init_ops);
12115init_ops_alloc_err:
12116 kfree(bp->init_data);
12117request_firmware_exit:
12118 release_firmware(bp->firmware);
12119
12120 return rc;
12121}
12122
12123
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012124static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12125 const struct pci_device_id *ent)
12126{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012127 struct net_device *dev = NULL;
12128 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012129 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012130 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012131
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012132 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012133 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012134 if (!dev) {
12135 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012136 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012137 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012138
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012139 bp = netdev_priv(dev);
12140 bp->msglevel = debug;
12141
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012142 pci_set_drvdata(pdev, dev);
12143
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012144 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012145 if (rc < 0) {
12146 free_netdev(dev);
12147 return rc;
12148 }
12149
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012150 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012151 if (rc)
12152 goto init_one_exit;
12153
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012154 /* Set init arrays */
12155 rc = bnx2x_init_firmware(bp, &pdev->dev);
12156 if (rc) {
12157 printk(KERN_ERR PFX "Error loading firmware\n");
12158 goto init_one_exit;
12159 }
12160
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012161 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012162 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012163 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012164 goto init_one_exit;
12165 }
12166
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012167 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Eliezer Tamir25047952008-02-28 11:50:16 -080012168 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000012169 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012170 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012171 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
Eliezer Tamir25047952008-02-28 11:50:16 -080012172 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070012173 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012174
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012175 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012176
12177init_one_exit:
12178 if (bp->regview)
12179 iounmap(bp->regview);
12180
12181 if (bp->doorbells)
12182 iounmap(bp->doorbells);
12183
12184 free_netdev(dev);
12185
12186 if (atomic_read(&pdev->enable_cnt) == 1)
12187 pci_release_regions(pdev);
12188
12189 pci_disable_device(pdev);
12190 pci_set_drvdata(pdev, NULL);
12191
12192 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012193}
12194
12195static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12196{
12197 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012198 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012199
Eliezer Tamir228241e2008-02-28 11:56:57 -080012200 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080012201 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12202 return;
12203 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012204 bp = netdev_priv(dev);
12205
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012206 unregister_netdev(dev);
12207
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012208 kfree(bp->init_ops_offsets);
12209 kfree(bp->init_ops);
12210 kfree(bp->init_data);
12211 release_firmware(bp->firmware);
12212
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012213 if (bp->regview)
12214 iounmap(bp->regview);
12215
12216 if (bp->doorbells)
12217 iounmap(bp->doorbells);
12218
12219 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012220
12221 if (atomic_read(&pdev->enable_cnt) == 1)
12222 pci_release_regions(pdev);
12223
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012224 pci_disable_device(pdev);
12225 pci_set_drvdata(pdev, NULL);
12226}
12227
12228static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12229{
12230 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012231 struct bnx2x *bp;
12232
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012233 if (!dev) {
12234 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12235 return -ENODEV;
12236 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012237 bp = netdev_priv(dev);
12238
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012239 rtnl_lock();
12240
12241 pci_save_state(pdev);
12242
12243 if (!netif_running(dev)) {
12244 rtnl_unlock();
12245 return 0;
12246 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012247
12248 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012249
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012250 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012251
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012252 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012253
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012254 rtnl_unlock();
12255
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012256 return 0;
12257}
12258
12259static int bnx2x_resume(struct pci_dev *pdev)
12260{
12261 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012262 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012263 int rc;
12264
Eliezer Tamir228241e2008-02-28 11:56:57 -080012265 if (!dev) {
12266 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12267 return -ENODEV;
12268 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012269 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012270
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012271 rtnl_lock();
12272
Eliezer Tamir228241e2008-02-28 11:56:57 -080012273 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012274
12275 if (!netif_running(dev)) {
12276 rtnl_unlock();
12277 return 0;
12278 }
12279
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012280 bnx2x_set_power_state(bp, PCI_D0);
12281 netif_device_attach(dev);
12282
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012283 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012284
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012285 rtnl_unlock();
12286
12287 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012288}
12289
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012290static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12291{
12292 int i;
12293
12294 bp->state = BNX2X_STATE_ERROR;
12295
12296 bp->rx_mode = BNX2X_RX_MODE_NONE;
12297
12298 bnx2x_netif_stop(bp, 0);
12299
12300 del_timer_sync(&bp->timer);
12301 bp->stats_state = STATS_STATE_DISABLED;
12302 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12303
12304 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +000012305 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012306
12307 if (CHIP_IS_E1(bp)) {
12308 struct mac_configuration_cmd *config =
12309 bnx2x_sp(bp, mcast_config);
12310
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012311 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012312 CAM_INVALIDATE(config->config_table[i]);
12313 }
12314
12315 /* Free SKBs, SGEs, TPA pool and driver internals */
12316 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012317 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012318 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012319 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012320 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012321 bnx2x_free_mem(bp);
12322
12323 bp->state = BNX2X_STATE_CLOSED;
12324
12325 netif_carrier_off(bp->dev);
12326
12327 return 0;
12328}
12329
12330static void bnx2x_eeh_recover(struct bnx2x *bp)
12331{
12332 u32 val;
12333
12334 mutex_init(&bp->port.phy_mutex);
12335
12336 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12337 bp->link_params.shmem_base = bp->common.shmem_base;
12338 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12339
12340 if (!bp->common.shmem_base ||
12341 (bp->common.shmem_base < 0xA0000) ||
12342 (bp->common.shmem_base >= 0xC0000)) {
12343 BNX2X_DEV_INFO("MCP not active\n");
12344 bp->flags |= NO_MCP_FLAG;
12345 return;
12346 }
12347
12348 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12349 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12350 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12351 BNX2X_ERR("BAD MCP validity signature\n");
12352
12353 if (!BP_NOMCP(bp)) {
12354 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12355 & DRV_MSG_SEQ_NUMBER_MASK);
12356 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12357 }
12358}
12359
Wendy Xiong493adb12008-06-23 20:36:22 -070012360/**
12361 * bnx2x_io_error_detected - called when PCI error is detected
12362 * @pdev: Pointer to PCI device
12363 * @state: The current pci connection state
12364 *
12365 * This function is called after a PCI bus error affecting
12366 * this device has been detected.
12367 */
12368static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12369 pci_channel_state_t state)
12370{
12371 struct net_device *dev = pci_get_drvdata(pdev);
12372 struct bnx2x *bp = netdev_priv(dev);
12373
12374 rtnl_lock();
12375
12376 netif_device_detach(dev);
12377
Dean Nelson07ce50e2009-07-31 09:13:25 +000012378 if (state == pci_channel_io_perm_failure) {
12379 rtnl_unlock();
12380 return PCI_ERS_RESULT_DISCONNECT;
12381 }
12382
Wendy Xiong493adb12008-06-23 20:36:22 -070012383 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012384 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012385
12386 pci_disable_device(pdev);
12387
12388 rtnl_unlock();
12389
12390 /* Request a slot reset */
12391 return PCI_ERS_RESULT_NEED_RESET;
12392}
12393
12394/**
12395 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12396 * @pdev: Pointer to PCI device
12397 *
12398 * Restart the card from scratch, as if from a cold-boot.
12399 */
12400static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12401{
12402 struct net_device *dev = pci_get_drvdata(pdev);
12403 struct bnx2x *bp = netdev_priv(dev);
12404
12405 rtnl_lock();
12406
12407 if (pci_enable_device(pdev)) {
12408 dev_err(&pdev->dev,
12409 "Cannot re-enable PCI device after reset\n");
12410 rtnl_unlock();
12411 return PCI_ERS_RESULT_DISCONNECT;
12412 }
12413
12414 pci_set_master(pdev);
12415 pci_restore_state(pdev);
12416
12417 if (netif_running(dev))
12418 bnx2x_set_power_state(bp, PCI_D0);
12419
12420 rtnl_unlock();
12421
12422 return PCI_ERS_RESULT_RECOVERED;
12423}
12424
12425/**
12426 * bnx2x_io_resume - called when traffic can start flowing again
12427 * @pdev: Pointer to PCI device
12428 *
12429 * This callback is called when the error recovery driver tells us that
12430 * its OK to resume normal operation.
12431 */
12432static void bnx2x_io_resume(struct pci_dev *pdev)
12433{
12434 struct net_device *dev = pci_get_drvdata(pdev);
12435 struct bnx2x *bp = netdev_priv(dev);
12436
12437 rtnl_lock();
12438
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012439 bnx2x_eeh_recover(bp);
12440
Wendy Xiong493adb12008-06-23 20:36:22 -070012441 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012442 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012443
12444 netif_device_attach(dev);
12445
12446 rtnl_unlock();
12447}
12448
12449static struct pci_error_handlers bnx2x_err_handler = {
12450 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012451 .slot_reset = bnx2x_io_slot_reset,
12452 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012453};
12454
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012455static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012456 .name = DRV_MODULE_NAME,
12457 .id_table = bnx2x_pci_tbl,
12458 .probe = bnx2x_init_one,
12459 .remove = __devexit_p(bnx2x_remove_one),
12460 .suspend = bnx2x_suspend,
12461 .resume = bnx2x_resume,
12462 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012463};
12464
12465static int __init bnx2x_init(void)
12466{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012467 int ret;
12468
Eilon Greenstein938cf542009-08-12 08:23:37 +000012469 printk(KERN_INFO "%s", version);
12470
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012471 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12472 if (bnx2x_wq == NULL) {
12473 printk(KERN_ERR PFX "Cannot create workqueue\n");
12474 return -ENOMEM;
12475 }
12476
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012477 ret = pci_register_driver(&bnx2x_pci_driver);
12478 if (ret) {
12479 printk(KERN_ERR PFX "Cannot register driver\n");
12480 destroy_workqueue(bnx2x_wq);
12481 }
12482 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012483}
12484
12485static void __exit bnx2x_cleanup(void)
12486{
12487 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012488
12489 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012490}
12491
12492module_init(bnx2x_init);
12493module_exit(bnx2x_cleanup);
12494
Michael Chan993ac7b2009-10-10 13:46:56 +000012495#ifdef BCM_CNIC
12496
12497/* count denotes the number of new completions we have seen */
12498static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12499{
12500 struct eth_spe *spe;
12501
12502#ifdef BNX2X_STOP_ON_ERROR
12503 if (unlikely(bp->panic))
12504 return;
12505#endif
12506
12507 spin_lock_bh(&bp->spq_lock);
12508 bp->cnic_spq_pending -= count;
12509
12510 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12511 bp->cnic_spq_pending++) {
12512
12513 if (!bp->cnic_kwq_pending)
12514 break;
12515
12516 spe = bnx2x_sp_get_next(bp);
12517 *spe = *bp->cnic_kwq_cons;
12518
12519 bp->cnic_kwq_pending--;
12520
12521 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12522 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12523
12524 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12525 bp->cnic_kwq_cons = bp->cnic_kwq;
12526 else
12527 bp->cnic_kwq_cons++;
12528 }
12529 bnx2x_sp_prod_update(bp);
12530 spin_unlock_bh(&bp->spq_lock);
12531}
12532
12533static int bnx2x_cnic_sp_queue(struct net_device *dev,
12534 struct kwqe_16 *kwqes[], u32 count)
12535{
12536 struct bnx2x *bp = netdev_priv(dev);
12537 int i;
12538
12539#ifdef BNX2X_STOP_ON_ERROR
12540 if (unlikely(bp->panic))
12541 return -EIO;
12542#endif
12543
12544 spin_lock_bh(&bp->spq_lock);
12545
12546 for (i = 0; i < count; i++) {
12547 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12548
12549 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12550 break;
12551
12552 *bp->cnic_kwq_prod = *spe;
12553
12554 bp->cnic_kwq_pending++;
12555
12556 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12557 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12558 spe->data.mac_config_addr.hi,
12559 spe->data.mac_config_addr.lo,
12560 bp->cnic_kwq_pending);
12561
12562 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12563 bp->cnic_kwq_prod = bp->cnic_kwq;
12564 else
12565 bp->cnic_kwq_prod++;
12566 }
12567
12568 spin_unlock_bh(&bp->spq_lock);
12569
12570 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12571 bnx2x_cnic_sp_post(bp, 0);
12572
12573 return i;
12574}
12575
12576static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12577{
12578 struct cnic_ops *c_ops;
12579 int rc = 0;
12580
12581 mutex_lock(&bp->cnic_mutex);
12582 c_ops = bp->cnic_ops;
12583 if (c_ops)
12584 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12585 mutex_unlock(&bp->cnic_mutex);
12586
12587 return rc;
12588}
12589
12590static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12591{
12592 struct cnic_ops *c_ops;
12593 int rc = 0;
12594
12595 rcu_read_lock();
12596 c_ops = rcu_dereference(bp->cnic_ops);
12597 if (c_ops)
12598 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12599 rcu_read_unlock();
12600
12601 return rc;
12602}
12603
12604/*
12605 * for commands that have no data
12606 */
12607static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12608{
12609 struct cnic_ctl_info ctl = {0};
12610
12611 ctl.cmd = cmd;
12612
12613 return bnx2x_cnic_ctl_send(bp, &ctl);
12614}
12615
12616static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12617{
12618 struct cnic_ctl_info ctl;
12619
12620 /* first we tell CNIC and only then we count this as a completion */
12621 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12622 ctl.data.comp.cid = cid;
12623
12624 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12625 bnx2x_cnic_sp_post(bp, 1);
12626}
12627
12628static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12629{
12630 struct bnx2x *bp = netdev_priv(dev);
12631 int rc = 0;
12632
12633 switch (ctl->cmd) {
12634 case DRV_CTL_CTXTBL_WR_CMD: {
12635 u32 index = ctl->data.io.offset;
12636 dma_addr_t addr = ctl->data.io.dma_addr;
12637
12638 bnx2x_ilt_wr(bp, index, addr);
12639 break;
12640 }
12641
12642 case DRV_CTL_COMPLETION_CMD: {
12643 int count = ctl->data.comp.comp_count;
12644
12645 bnx2x_cnic_sp_post(bp, count);
12646 break;
12647 }
12648
12649 /* rtnl_lock is held. */
12650 case DRV_CTL_START_L2_CMD: {
12651 u32 cli = ctl->data.ring.client_id;
12652
12653 bp->rx_mode_cl_mask |= (1 << cli);
12654 bnx2x_set_storm_rx_mode(bp);
12655 break;
12656 }
12657
12658 /* rtnl_lock is held. */
12659 case DRV_CTL_STOP_L2_CMD: {
12660 u32 cli = ctl->data.ring.client_id;
12661
12662 bp->rx_mode_cl_mask &= ~(1 << cli);
12663 bnx2x_set_storm_rx_mode(bp);
12664 break;
12665 }
12666
12667 default:
12668 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12669 rc = -EINVAL;
12670 }
12671
12672 return rc;
12673}
12674
12675static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12676{
12677 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12678
12679 if (bp->flags & USING_MSIX_FLAG) {
12680 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12681 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12682 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12683 } else {
12684 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12685 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12686 }
12687 cp->irq_arr[0].status_blk = bp->cnic_sb;
12688 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12689 cp->irq_arr[1].status_blk = bp->def_status_blk;
12690 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12691
12692 cp->num_irq = 2;
12693}
12694
12695static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12696 void *data)
12697{
12698 struct bnx2x *bp = netdev_priv(dev);
12699 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12700
12701 if (ops == NULL)
12702 return -EINVAL;
12703
12704 if (atomic_read(&bp->intr_sem) != 0)
12705 return -EBUSY;
12706
12707 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12708 if (!bp->cnic_kwq)
12709 return -ENOMEM;
12710
12711 bp->cnic_kwq_cons = bp->cnic_kwq;
12712 bp->cnic_kwq_prod = bp->cnic_kwq;
12713 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12714
12715 bp->cnic_spq_pending = 0;
12716 bp->cnic_kwq_pending = 0;
12717
12718 bp->cnic_data = data;
12719
12720 cp->num_irq = 0;
12721 cp->drv_state = CNIC_DRV_STATE_REGD;
12722
12723 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12724
12725 bnx2x_setup_cnic_irq_info(bp);
12726 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12727 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12728 rcu_assign_pointer(bp->cnic_ops, ops);
12729
12730 return 0;
12731}
12732
12733static int bnx2x_unregister_cnic(struct net_device *dev)
12734{
12735 struct bnx2x *bp = netdev_priv(dev);
12736 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12737
12738 mutex_lock(&bp->cnic_mutex);
12739 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12740 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12741 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12742 }
12743 cp->drv_state = 0;
12744 rcu_assign_pointer(bp->cnic_ops, NULL);
12745 mutex_unlock(&bp->cnic_mutex);
12746 synchronize_rcu();
12747 kfree(bp->cnic_kwq);
12748 bp->cnic_kwq = NULL;
12749
12750 return 0;
12751}
12752
12753struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12754{
12755 struct bnx2x *bp = netdev_priv(dev);
12756 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12757
12758 cp->drv_owner = THIS_MODULE;
12759 cp->chip_id = CHIP_ID(bp);
12760 cp->pdev = bp->pdev;
12761 cp->io_base = bp->regview;
12762 cp->io_base2 = bp->doorbells;
12763 cp->max_kwqe_pending = 8;
12764 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12765 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12766 cp->ctx_tbl_len = CNIC_ILT_LINES;
12767 cp->starting_cid = BCM_CNIC_CID_START;
12768 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12769 cp->drv_ctl = bnx2x_drv_ctl;
12770 cp->drv_register_cnic = bnx2x_register_cnic;
12771 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12772
12773 return cp;
12774}
12775EXPORT_SYMBOL(bnx2x_cnic_probe);
12776
12777#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012778