blob: ed785a30e98bbcadaa0de666d6763c0608f221fc [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Eilon Greenstein359d8b12009-02-12 08:38:25 +000054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000058#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +000060#define DRV_MODULE_VERSION "1.52.1-7"
61#define DRV_MODULE_RELDATE "2010/02/28"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070062#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020063
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070064#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000067#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070074
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077
Andrew Morton53a10562008-02-09 23:16:41 -080078static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070079 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070082MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000083MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020084MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000086MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020088
Eilon Greenstein555f6c72009-02-12 08:36:11 +000089static int multi_mode = 1;
90module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070091MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000094static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000098
Eilon Greenstein19680c42008-08-13 15:47:33 -070099static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -0700100module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
Eilon Greensteina18f5122009-08-12 08:23:26 +0000107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200112module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000113MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200120module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800125static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200126
127enum bnx2x_board_type {
128 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129 BCM57711 = 1,
130 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131};
132
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800134static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200135 char *name;
136} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200140};
141
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700142
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200205{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000206 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000219 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200220
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200224#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200226#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000227 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200228#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200250
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000251 mutex_lock(&bp->dmae_mutex);
252
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200253 *wb_comp = 0;
254
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200256
257 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000263 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200264 break;
265 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700266 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200272 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700273
274 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200275}
276
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200278{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000279 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000294 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200295
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200299#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200301#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200303#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200314
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200322
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200326 *wb_comp = 0;
327
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200329
330 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000335 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200336 break;
337 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700338 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200344 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348
349 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200350}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200351
Eilon Greenstein573f2032009-08-12 08:24:14 +0000352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
375}
376
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200390 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200393
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200402
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200419 }
420 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
503 }
504 }
505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506 return rc;
507}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000512 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800516 mark = ((mark + 0x3) & ~0x3);
Joe Perches7995c642010-02-17 15:01:52 +0000517 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200518
Joe Perches7995c642010-02-17 15:01:52 +0000519 pr_err("");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000525 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000532 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200533 }
Joe Perches7995c642010-02-17 15:01:52 +0000534 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545 BNX2X_ERR("begin crash dump -----------------\n");
546
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000556 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000557 struct bnx2x_fastpath *fp = &bp->fp[i];
558
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
571
572 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000573 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200574 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700582 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700583 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200585
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000586 /* Rings */
587 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000588 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000589 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000593 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600
Eilon Greenstein3196a882008-08-13 15:58:49 -0700601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000603 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700609 }
610
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618 }
619 }
620
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000621 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000622 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000641 }
642 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800649static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700651 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200656
657 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800672
Eilon Greenstein8badd272009-02-12 08:36:15 +0000673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800675
676 REG_WR(bp, addr, val);
677
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
Eilon Greenstein8badd272009-02-12 08:36:15 +0000681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
684 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700695 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707}
708
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800709static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200710{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700711 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
Eilon Greenstein8badd272009-02-12 08:36:15 +0000723 /* flush all outstanding writes */
724 mmiowb();
725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000734 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200743
744 /* make sure all ISRs are done */
745 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +0000748#ifdef BCM_CNIC
749 offset++;
750#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000752 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200759}
760
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700761/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762
763/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700764 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200765 */
766
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 u8 storm, u16 index, u8 op, u8 update)
769{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
Eilon Greenstein5c862842008-08-13 15:51:48 -0700781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200788}
789
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791{
792 struct host_status_block *fpsb = fp->status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200793
794 barrier(); /* status block is written to by the chip */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200797}
798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200804
Eilon Greenstein5c862842008-08-13 15:51:48 -0700805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200807
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000821}
822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200834 int nbd;
835
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847
Eilon Greensteinca003922009-08-12 22:53:28 -0700848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700851 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 bnx2x_panic();
853 }
854#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700855 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Eilon Greensteinca003922009-08-12 22:53:28 -0700857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700882 WARN_ON(!skb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000883 dev_kfree_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700887 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200888}
889
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200891{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892 s16 used;
893 u16 prod;
894 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700896 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200903
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700904#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700908#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200909
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700910 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200911}
912
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924{
925 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000926 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200928
929#ifdef BNX2X_STOP_ON_ERROR
930 if (unlikely(bp->panic))
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000931 return -1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200932#endif
933
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000934 txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936 sw_cons = fp->tx_pkt_cons;
937
938 while (sw_cons != hw_cons) {
939 u16 pkt_cons;
940
941 pkt_cons = TX_BD(sw_cons);
942
943 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700945 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200946 hw_cons, sw_cons, pkt_cons);
947
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700948/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 rmb();
950 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951 }
952*/
953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954 sw_cons++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955 }
956
957 fp->tx_pkt_cons = sw_cons;
958 fp->tx_bd_cons = bd_cons;
959
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +0000960 /* Need to make the tx_bd_cons update visible to start_xmit()
961 * before checking for netif_tx_queue_stopped(). Without the
962 * memory barrier, there is a small possibility that
963 * start_xmit() will miss it and cause the queue to be stopped
964 * forever.
965 */
966 smp_wmb();
967
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000969 if (unlikely(netif_tx_queue_stopped(txq))) {
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +0000970 /* Taking tx_lock() is needed to prevent reenabling the queue
971 * while it's empty. This could have happen if rx_action() gets
972 * suspended in bnx2x_tx_int() after the condition before
973 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
974 *
975 * stops the queue->sees fresh tx_bd_cons->releases the queue->
976 * sends some packets consuming the whole queue again->
977 * stops the queue
Eilon Greenstein60447352009-03-02 07:59:24 +0000978 */
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +0000979
980 __netif_tx_lock(txq, smp_processor_id());
Eilon Greenstein60447352009-03-02 07:59:24 +0000981
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000982 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700983 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200984 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000985 netif_tx_wake_queue(txq);
Vladislav Zolotarovc16cc0b2010-02-28 00:12:02 +0000986
987 __netif_tx_unlock(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200988 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000989 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200990}
991
Michael Chan993ac7b2009-10-10 13:46:56 +0000992#ifdef BCM_CNIC
993static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
994#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -0700995
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200996static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
997 union eth_rx_cqe *rr_cqe)
998{
999 struct bnx2x *bp = fp->bp;
1000 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1001 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001003 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001004 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001005 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001006 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001007
1008 bp->spq_left++;
1009
Eilon Greenstein0626b892009-02-12 08:38:14 +00001010 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001011 switch (command | fp->state) {
1012 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1013 BNX2X_FP_STATE_OPENING):
1014 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1015 cid);
1016 fp->state = BNX2X_FP_STATE_OPEN;
1017 break;
1018
1019 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1020 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1021 cid);
1022 fp->state = BNX2X_FP_STATE_HALTED;
1023 break;
1024
1025 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001026 BNX2X_ERR("unexpected MC reply (%d) "
1027 "fp->state is %x\n", command, fp->state);
1028 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001029 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001030 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001031 return;
1032 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -08001033
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001034 switch (command | bp->state) {
1035 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1036 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1037 bp->state = BNX2X_STATE_OPEN;
1038 break;
1039
1040 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1041 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1042 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1043 fp->state = BNX2X_FP_STATE_HALTED;
1044 break;
1045
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001046 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001047 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001048 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001049 break;
1050
Michael Chan993ac7b2009-10-10 13:46:56 +00001051#ifdef BCM_CNIC
1052 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1053 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1054 bnx2x_cnic_cfc_comp(bp, cid);
1055 break;
1056#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001057
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001061 bp->set_mac_pending--;
1062 smp_wmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001063 break;
1064
Eliezer Tamir49d66772008-02-28 11:53:13 -08001065 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001066 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Michael Chane665bfd2009-10-10 13:46:54 +00001067 bp->set_mac_pending--;
1068 smp_wmb();
Eliezer Tamir49d66772008-02-28 11:53:13 -08001069 break;
1070
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001071 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001072 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001073 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001074 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001075 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001076 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001077}
1078
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001079static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, u16 index)
1081{
1082 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1083 struct page *page = sw_buf->page;
1084 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1085
1086 /* Skip "next page" elements */
1087 if (!page)
1088 return;
1089
1090 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001091 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001092 __free_pages(page, PAGES_PER_SGE_SHIFT);
1093
1094 sw_buf->page = NULL;
1095 sge->addr_hi = 0;
1096 sge->addr_lo = 0;
1097}
1098
1099static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1100 struct bnx2x_fastpath *fp, int last)
1101{
1102 int i;
1103
1104 for (i = 0; i < last; i++)
1105 bnx2x_free_rx_sge(bp, fp, i);
1106}
1107
1108static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1109 struct bnx2x_fastpath *fp, u16 index)
1110{
1111 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1112 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1113 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1114 dma_addr_t mapping;
1115
1116 if (unlikely(page == NULL))
1117 return -ENOMEM;
1118
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001119 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001120 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001121 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001122 __free_pages(page, PAGES_PER_SGE_SHIFT);
1123 return -ENOMEM;
1124 }
1125
1126 sw_buf->page = page;
1127 pci_unmap_addr_set(sw_buf, mapping, mapping);
1128
1129 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1130 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1131
1132 return 0;
1133}
1134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001135static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1136 struct bnx2x_fastpath *fp, u16 index)
1137{
1138 struct sk_buff *skb;
1139 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1140 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1141 dma_addr_t mapping;
1142
1143 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1144 if (unlikely(skb == NULL))
1145 return -ENOMEM;
1146
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001147 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001148 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001149 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001150 dev_kfree_skb(skb);
1151 return -ENOMEM;
1152 }
1153
1154 rx_buf->skb = skb;
1155 pci_unmap_addr_set(rx_buf, mapping, mapping);
1156
1157 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1158 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1159
1160 return 0;
1161}
1162
1163/* note that we are not allocating a new skb,
1164 * we are just moving one from cons to prod
1165 * we are not creating a new mapping,
1166 * so there is no need to check for dma_mapping_error().
1167 */
1168static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1169 struct sk_buff *skb, u16 cons, u16 prod)
1170{
1171 struct bnx2x *bp = fp->bp;
1172 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1173 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1174 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1175 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1176
1177 pci_dma_sync_single_for_device(bp->pdev,
1178 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001179 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001180
1181 prod_rx_buf->skb = cons_rx_buf->skb;
1182 pci_unmap_addr_set(prod_rx_buf, mapping,
1183 pci_unmap_addr(cons_rx_buf, mapping));
1184 *prod_bd = *cons_bd;
1185}
1186
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001187static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1188 u16 idx)
1189{
1190 u16 last_max = fp->last_max_sge;
1191
1192 if (SUB_S16(idx, last_max) > 0)
1193 fp->last_max_sge = idx;
1194}
1195
1196static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1197{
1198 int i, j;
1199
1200 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1201 int idx = RX_SGE_CNT * i - 1;
1202
1203 for (j = 0; j < 2; j++) {
1204 SGE_MASK_CLEAR_BIT(fp, idx);
1205 idx--;
1206 }
1207 }
1208}
1209
1210static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1211 struct eth_fast_path_rx_cqe *fp_cqe)
1212{
1213 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001214 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001215 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001216 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001217 u16 last_max, last_elem, first_elem;
1218 u16 delta = 0;
1219 u16 i;
1220
1221 if (!sge_len)
1222 return;
1223
1224 /* First mark all used pages */
1225 for (i = 0; i < sge_len; i++)
1226 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1227
1228 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1229 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1230
1231 /* Here we assume that the last SGE index is the biggest */
1232 prefetch((void *)(fp->sge_mask));
1233 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1234
1235 last_max = RX_SGE(fp->last_max_sge);
1236 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1237 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1238
1239 /* If ring is not full */
1240 if (last_elem + 1 != first_elem)
1241 last_elem++;
1242
1243 /* Now update the prod */
1244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1245 if (likely(fp->sge_mask[i]))
1246 break;
1247
1248 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1249 delta += RX_SGE_MASK_ELEM_SZ;
1250 }
1251
1252 if (delta > 0) {
1253 fp->rx_sge_prod += delta;
1254 /* clear page-end entries */
1255 bnx2x_clear_sge_mask_next_elems(fp);
1256 }
1257
1258 DP(NETIF_MSG_RX_STATUS,
1259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1260 fp->last_max_sge, fp->rx_sge_prod);
1261}
1262
1263static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1264{
1265 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1266 memset(fp->sge_mask, 0xff,
1267 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1268
Eilon Greenstein33471622008-08-13 15:59:08 -07001269 /* Clear the two last indices in the page to 1:
1270 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001271 hence will never be indicated and should be removed from
1272 the calculations. */
1273 bnx2x_clear_sge_mask_next_elems(fp);
1274}
1275
1276static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1277 struct sk_buff *skb, u16 cons, u16 prod)
1278{
1279 struct bnx2x *bp = fp->bp;
1280 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1281 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1282 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1283 dma_addr_t mapping;
1284
1285 /* move empty skb from pool to prod and map it */
1286 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1287 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001288 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001289 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1290
1291 /* move partial skb from cons to pool (don't unmap yet) */
1292 fp->tpa_pool[queue] = *cons_rx_buf;
1293
1294 /* mark bin state as start - print error if current state != stop */
1295 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1296 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1297
1298 fp->tpa_state[queue] = BNX2X_TPA_START;
1299
1300 /* point prod_bd to new skb */
1301 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1302 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1303
1304#ifdef BNX2X_STOP_ON_ERROR
1305 fp->tpa_queue_used |= (1 << queue);
1306#ifdef __powerpc64__
1307 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1308#else
1309 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1310#endif
1311 fp->tpa_queue_used);
1312#endif
1313}
1314
1315static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1316 struct sk_buff *skb,
1317 struct eth_fast_path_rx_cqe *fp_cqe,
1318 u16 cqe_idx)
1319{
1320 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001321 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1322 u32 i, frag_len, frag_size, pages;
1323 int err;
1324 int j;
1325
1326 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001327 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001328
1329 /* This is needed in order to enable forwarding support */
1330 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001331 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001332 max(frag_size, (u32)len_on_bd));
1333
1334#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001335 if (pages >
1336 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001337 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1338 pages, cqe_idx);
1339 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1340 fp_cqe->pkt_len, len_on_bd);
1341 bnx2x_panic();
1342 return -EINVAL;
1343 }
1344#endif
1345
1346 /* Run through the SGL and compose the fragmented skb */
1347 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1348 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1349
1350 /* FW gives the indices of the SGE as if the ring is an array
1351 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001352 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001353 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001354 old_rx_pg = *rx_pg;
1355
1356 /* If we fail to allocate a substitute page, we simply stop
1357 where we are and drop the whole packet */
1358 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1359 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001360 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001361 return err;
1362 }
1363
1364 /* Unmap the page as we r going to pass it to the stack */
1365 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001366 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001367
1368 /* Add one frag and update the appropriate fields in the skb */
1369 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1370
1371 skb->data_len += frag_len;
1372 skb->truesize += frag_len;
1373 skb->len += frag_len;
1374
1375 frag_size -= frag_len;
1376 }
1377
1378 return 0;
1379}
1380
1381static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1382 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1383 u16 cqe_idx)
1384{
1385 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1386 struct sk_buff *skb = rx_buf->skb;
1387 /* alloc new skb */
1388 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1389
1390 /* Unmap skb in the pool anyway, as we are going to change
1391 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1392 fails. */
1393 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001394 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001395
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001396 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001397 /* fix ip xsum and give it to the stack */
1398 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001399#ifdef BCM_VLAN
1400 int is_vlan_cqe =
1401 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1402 PARSING_FLAGS_VLAN);
1403 int is_not_hwaccel_vlan_cqe =
1404 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1405#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001406
1407 prefetch(skb);
1408 prefetch(((char *)(skb)) + 128);
1409
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001410#ifdef BNX2X_STOP_ON_ERROR
1411 if (pad + len > bp->rx_buf_size) {
1412 BNX2X_ERR("skb_put is about to fail... "
1413 "pad %d len %d rx_buf_size %d\n",
1414 pad, len, bp->rx_buf_size);
1415 bnx2x_panic();
1416 return;
1417 }
1418#endif
1419
1420 skb_reserve(skb, pad);
1421 skb_put(skb, len);
1422
1423 skb->protocol = eth_type_trans(skb, bp->dev);
1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
1425
1426 {
1427 struct iphdr *iph;
1428
1429 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001430#ifdef BCM_VLAN
1431 /* If there is no Rx VLAN offloading -
1432 take VLAN tag into an account */
1433 if (unlikely(is_not_hwaccel_vlan_cqe))
1434 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1435#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001436 iph->check = 0;
1437 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1438 }
1439
1440 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1441 &cqe->fast_path_cqe, cqe_idx)) {
1442#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001443 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1444 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001445 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1446 le16_to_cpu(cqe->fast_path_cqe.
1447 vlan_tag));
1448 else
1449#endif
1450 netif_receive_skb(skb);
1451 } else {
1452 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1453 " - dropping packet!\n");
1454 dev_kfree_skb(skb);
1455 }
1456
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001457
1458 /* put new skb in bin */
1459 fp->tpa_pool[queue].skb = new_skb;
1460
1461 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001462 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001463 DP(NETIF_MSG_RX_STATUS,
1464 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001465 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001466 }
1467
1468 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1469}
1470
1471static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1472 struct bnx2x_fastpath *fp,
1473 u16 bd_prod, u16 rx_comp_prod,
1474 u16 rx_sge_prod)
1475{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001476 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001477 int i;
1478
1479 /* Update producers */
1480 rx_prods.bd_prod = bd_prod;
1481 rx_prods.cqe_prod = rx_comp_prod;
1482 rx_prods.sge_prod = rx_sge_prod;
1483
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001484 /*
1485 * Make sure that the BD and SGE data is updated before updating the
1486 * producers since FW might read the BD/SGE right after the producer
1487 * is updated.
1488 * This is only applicable for weak-ordered memory model archs such
1489 * as IA-64. The following barrier is also mandatory since FW will
1490 * assumes BDs must have buffers.
1491 */
1492 wmb();
1493
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001494 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1495 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001496 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001497 ((u32 *)&rx_prods)[i]);
1498
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001499 mmiowb(); /* keep prod updates ordered */
1500
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001501 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001502 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1503 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001504}
1505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001506static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1507{
1508 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001509 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001510 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1511 int rx_pkt = 0;
1512
1513#ifdef BNX2X_STOP_ON_ERROR
1514 if (unlikely(bp->panic))
1515 return 0;
1516#endif
1517
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001518 /* CQ "next element" is of the size of the regular element,
1519 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001520 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1521 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1522 hw_comp_cons++;
1523
1524 bd_cons = fp->rx_bd_cons;
1525 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001526 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001527 sw_comp_cons = fp->rx_comp_cons;
1528 sw_comp_prod = fp->rx_comp_prod;
1529
1530 /* Memory barrier necessary as speculative reads of the rx
1531 * buffer can be ahead of the index in the status block
1532 */
1533 rmb();
1534
1535 DP(NETIF_MSG_RX_STATUS,
1536 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001537 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001538
1539 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001540 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001541 struct sk_buff *skb;
1542 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001543 u8 cqe_fp_flags;
1544 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001545
1546 comp_ring_cons = RCQ_BD(sw_comp_cons);
1547 bd_prod = RX_BD(bd_prod);
1548 bd_cons = RX_BD(bd_cons);
1549
Eilon Greenstein619e7a62009-08-12 08:23:20 +00001550 /* Prefetch the page containing the BD descriptor
1551 at producer's index. It will be needed when new skb is
1552 allocated */
1553 prefetch((void *)(PAGE_ALIGN((unsigned long)
1554 (&fp->rx_desc_ring[bd_prod])) -
1555 PAGE_SIZE + 1));
1556
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001557 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001558 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001559
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001560 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001561 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1562 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001563 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001564 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1565 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001566
1567 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001568 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001569 bnx2x_sp_event(fp, cqe);
1570 goto next_cqe;
1571
1572 /* this is an rx packet */
1573 } else {
1574 rx_buf = &fp->rx_buf_ring[bd_cons];
1575 skb = rx_buf->skb;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001576 prefetch(skb);
1577 prefetch((u8 *)skb + 256);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001578 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1579 pad = cqe->fast_path_cqe.placement_offset;
1580
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001581 /* If CQE is marked both TPA_START and TPA_END
1582 it is a non-TPA CQE */
1583 if ((!fp->disable_tpa) &&
1584 (TPA_TYPE(cqe_fp_flags) !=
1585 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001586 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001587
1588 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1589 DP(NETIF_MSG_RX_STATUS,
1590 "calling tpa_start on queue %d\n",
1591 queue);
1592
1593 bnx2x_tpa_start(fp, queue, skb,
1594 bd_cons, bd_prod);
1595 goto next_rx;
1596 }
1597
1598 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1599 DP(NETIF_MSG_RX_STATUS,
1600 "calling tpa_stop on queue %d\n",
1601 queue);
1602
1603 if (!BNX2X_RX_SUM_FIX(cqe))
1604 BNX2X_ERR("STOP on none TCP "
1605 "data\n");
1606
1607 /* This is a size of the linear data
1608 on this skb */
1609 len = le16_to_cpu(cqe->fast_path_cqe.
1610 len_on_bd);
1611 bnx2x_tpa_stop(bp, fp, queue, pad,
1612 len, cqe, comp_ring_cons);
1613#ifdef BNX2X_STOP_ON_ERROR
1614 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001615 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001616#endif
1617
1618 bnx2x_update_sge_prod(fp,
1619 &cqe->fast_path_cqe);
1620 goto next_cqe;
1621 }
1622 }
1623
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001624 pci_dma_sync_single_for_device(bp->pdev,
1625 pci_unmap_addr(rx_buf, mapping),
1626 pad + RX_COPY_THRESH,
1627 PCI_DMA_FROMDEVICE);
1628 prefetch(skb);
1629 prefetch(((char *)(skb)) + 128);
1630
1631 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001632 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001633 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001634 "ERROR flags %x rx packet %u\n",
1635 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001636 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001637 goto reuse_rx;
1638 }
1639
1640 /* Since we don't have a jumbo ring
1641 * copy small packets if mtu > 1500
1642 */
1643 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1644 (len <= RX_COPY_THRESH)) {
1645 struct sk_buff *new_skb;
1646
1647 new_skb = netdev_alloc_skb(bp->dev,
1648 len + pad);
1649 if (new_skb == NULL) {
1650 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001651 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001652 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001653 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001654 goto reuse_rx;
1655 }
1656
1657 /* aligned copy */
1658 skb_copy_from_linear_data_offset(skb, pad,
1659 new_skb->data + pad, len);
1660 skb_reserve(new_skb, pad);
1661 skb_put(new_skb, len);
1662
1663 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1664
1665 skb = new_skb;
1666
Eilon Greensteina119a062009-08-12 08:23:23 +00001667 } else
1668 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001669 pci_unmap_single(bp->pdev,
1670 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001671 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001672 PCI_DMA_FROMDEVICE);
1673 skb_reserve(skb, pad);
1674 skb_put(skb, len);
1675
1676 } else {
1677 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001678 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001679 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001680 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001681reuse_rx:
1682 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1683 goto next_rx;
1684 }
1685
1686 skb->protocol = eth_type_trans(skb, bp->dev);
1687
1688 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001689 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001690 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1691 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001692 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001693 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001694 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001695 }
1696
Eilon Greenstein748e5432009-02-12 08:36:37 +00001697 skb_record_rx_queue(skb, fp->index);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001698
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001699#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001700 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001701 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1702 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001703 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1704 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1705 else
1706#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001707 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001709
1710next_rx:
1711 rx_buf->skb = NULL;
1712
1713 bd_cons = NEXT_RX_IDX(bd_cons);
1714 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001715 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1716 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001717next_cqe:
1718 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1719 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001720
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001721 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001722 break;
1723 } /* while */
1724
1725 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001726 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001727 fp->rx_comp_cons = sw_comp_cons;
1728 fp->rx_comp_prod = sw_comp_prod;
1729
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001730 /* Update producers */
1731 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1732 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001733
1734 fp->rx_pkt += rx_pkt;
1735 fp->rx_calls++;
1736
1737 return rx_pkt;
1738}
1739
1740static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1741{
1742 struct bnx2x_fastpath *fp = fp_cookie;
1743 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001744
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001745 /* Return here if interrupt is disabled */
1746 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1747 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1748 return IRQ_HANDLED;
1749 }
1750
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001751 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001752 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001754
1755#ifdef BNX2X_STOP_ON_ERROR
1756 if (unlikely(bp->panic))
1757 return IRQ_HANDLED;
1758#endif
1759
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001760 /* Handle Rx and Tx according to MSI-X vector */
1761 prefetch(fp->rx_cons_sb);
1762 prefetch(fp->tx_cons_sb);
1763 prefetch(&fp->status_blk->u_status_block.status_block_index);
1764 prefetch(&fp->status_blk->c_status_block.status_block_index);
1765 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001766
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001767 return IRQ_HANDLED;
1768}
1769
1770static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1771{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001772 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001773 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001774 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001775 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001776
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001777 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001778 if (unlikely(status == 0)) {
1779 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1780 return IRQ_NONE;
1781 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001782 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001783
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001784 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001785 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1786 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1787 return IRQ_HANDLED;
1788 }
1789
Eilon Greenstein3196a882008-08-13 15:58:49 -07001790#ifdef BNX2X_STOP_ON_ERROR
1791 if (unlikely(bp->panic))
1792 return IRQ_HANDLED;
1793#endif
1794
Eilon Greensteinca003922009-08-12 22:53:28 -07001795 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1796 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001797
Eilon Greensteinca003922009-08-12 22:53:28 -07001798 mask = 0x2 << fp->sb_id;
1799 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001800 /* Handle Rx and Tx according to SB id */
1801 prefetch(fp->rx_cons_sb);
1802 prefetch(&fp->status_blk->u_status_block.
1803 status_block_index);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->c_status_block.
1806 status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001808 status &= ~mask;
1809 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001810 }
1811
Michael Chan993ac7b2009-10-10 13:46:56 +00001812#ifdef BCM_CNIC
1813 mask = 0x2 << CNIC_SB_ID(bp);
1814 if (status & (mask | 0x1)) {
1815 struct cnic_ops *c_ops = NULL;
1816
1817 rcu_read_lock();
1818 c_ops = rcu_dereference(bp->cnic_ops);
1819 if (c_ops)
1820 c_ops->cnic_handler(bp->cnic_data, NULL);
1821 rcu_read_unlock();
1822
1823 status &= ~mask;
1824 }
1825#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001826
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001827 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001828 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001829
1830 status &= ~0x1;
1831 if (!status)
1832 return IRQ_HANDLED;
1833 }
1834
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001835 if (status)
1836 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1837 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001838
1839 return IRQ_HANDLED;
1840}
1841
1842/* end of fast path */
1843
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001844static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001845
1846/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001847
1848/*
1849 * General service functions
1850 */
1851
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001852static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001853{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001854 u32 lock_status;
1855 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001856 int func = BP_FUNC(bp);
1857 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001858 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001859
1860 /* Validating that the resource is within range */
1861 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1862 DP(NETIF_MSG_HW,
1863 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1864 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1865 return -EINVAL;
1866 }
1867
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001868 if (func <= 5) {
1869 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1870 } else {
1871 hw_lock_control_reg =
1872 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1873 }
1874
Eliezer Tamirf1410642008-02-28 11:51:50 -08001875 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001876 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001877 if (lock_status & resource_bit) {
1878 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1879 lock_status, resource_bit);
1880 return -EEXIST;
1881 }
1882
Eilon Greenstein46230472008-08-25 15:23:30 -07001883 /* Try for 5 second every 5ms */
1884 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001886 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1887 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001888 if (lock_status & resource_bit)
1889 return 0;
1890
1891 msleep(5);
1892 }
1893 DP(NETIF_MSG_HW, "Timeout\n");
1894 return -EAGAIN;
1895}
1896
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001897static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001898{
1899 u32 lock_status;
1900 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001901 int func = BP_FUNC(bp);
1902 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001903
1904 /* Validating that the resource is within range */
1905 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1906 DP(NETIF_MSG_HW,
1907 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1908 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1909 return -EINVAL;
1910 }
1911
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001912 if (func <= 5) {
1913 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1914 } else {
1915 hw_lock_control_reg =
1916 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1917 }
1918
Eliezer Tamirf1410642008-02-28 11:51:50 -08001919 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001920 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001921 if (!(lock_status & resource_bit)) {
1922 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1923 lock_status, resource_bit);
1924 return -EFAULT;
1925 }
1926
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001927 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001928 return 0;
1929}
1930
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001931/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001932static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001933{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001934 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001935
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001936 if (bp->port.need_hw_lock)
1937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001938}
1939
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001940static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001941{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001942 if (bp->port.need_hw_lock)
1943 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001944
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001945 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001946}
1947
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001948int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1949{
1950 /* The GPIO should be swapped if swap register is set and active */
1951 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1952 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1953 int gpio_shift = gpio_num +
1954 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1955 u32 gpio_mask = (1 << gpio_shift);
1956 u32 gpio_reg;
1957 int value;
1958
1959 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1960 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1961 return -EINVAL;
1962 }
1963
1964 /* read GPIO value */
1965 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1966
1967 /* get the requested pin value */
1968 if ((gpio_reg & gpio_mask) == gpio_mask)
1969 value = 1;
1970 else
1971 value = 0;
1972
1973 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1974
1975 return value;
1976}
1977
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001978int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001979{
1980 /* The GPIO should be swapped if swap register is set and active */
1981 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001982 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001983 int gpio_shift = gpio_num +
1984 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1985 u32 gpio_mask = (1 << gpio_shift);
1986 u32 gpio_reg;
1987
1988 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1989 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1990 return -EINVAL;
1991 }
1992
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001993 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001994 /* read GPIO and mask except the float bits */
1995 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1996
1997 switch (mode) {
1998 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1999 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2000 gpio_num, gpio_shift);
2001 /* clear FLOAT and set CLR */
2002 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2003 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2004 break;
2005
2006 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2007 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2008 gpio_num, gpio_shift);
2009 /* clear FLOAT and set SET */
2010 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2011 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2012 break;
2013
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002014 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002015 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2016 gpio_num, gpio_shift);
2017 /* set FLOAT */
2018 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2019 break;
2020
2021 default:
2022 break;
2023 }
2024
2025 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002027
2028 return 0;
2029}
2030
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00002031int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2032{
2033 /* The GPIO should be swapped if swap register is set and active */
2034 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2035 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2036 int gpio_shift = gpio_num +
2037 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2038 u32 gpio_mask = (1 << gpio_shift);
2039 u32 gpio_reg;
2040
2041 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2042 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2043 return -EINVAL;
2044 }
2045
2046 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2047 /* read GPIO int */
2048 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2049
2050 switch (mode) {
2051 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2052 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2053 "output low\n", gpio_num, gpio_shift);
2054 /* clear SET and set CLR */
2055 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2056 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2057 break;
2058
2059 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2060 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2061 "output high\n", gpio_num, gpio_shift);
2062 /* clear CLR and set SET */
2063 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2064 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 break;
2066
2067 default:
2068 break;
2069 }
2070
2071 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2072 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2073
2074 return 0;
2075}
2076
Eliezer Tamirf1410642008-02-28 11:51:50 -08002077static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2078{
2079 u32 spio_mask = (1 << spio_num);
2080 u32 spio_reg;
2081
2082 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2083 (spio_num > MISC_REGISTERS_SPIO_7)) {
2084 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2085 return -EINVAL;
2086 }
2087
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002088 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002089 /* read SPIO and mask except the float bits */
2090 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2091
2092 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002093 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002094 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2095 /* clear FLOAT and set CLR */
2096 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2098 break;
2099
Eilon Greenstein6378c022008-08-13 15:59:25 -07002100 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002101 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2102 /* clear FLOAT and set SET */
2103 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2104 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2105 break;
2106
2107 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2108 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2109 /* set FLOAT */
2110 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2111 break;
2112
2113 default:
2114 break;
2115 }
2116
2117 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002118 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002119
2120 return 0;
2121}
2122
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002123static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002124{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002125 switch (bp->link_vars.ieee_fc &
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002127 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002128 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002129 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002130 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002131
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002132 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002133 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002134 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002135 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002136
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002138 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002139 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002140
Eliezer Tamirf1410642008-02-28 11:51:50 -08002141 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002142 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002144 break;
2145 }
2146}
2147
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002148static void bnx2x_link_report(struct bnx2x *bp)
2149{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002150 if (bp->flags & MF_FUNC_DIS) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002151 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002152 netdev_err(bp->dev, "NIC Link is Down\n");
Eilon Greenstein2691d512009-08-12 08:22:08 +00002153 return;
2154 }
2155
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002156 if (bp->link_vars.link_up) {
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002157 u16 line_speed;
2158
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002159 if (bp->state == BNX2X_STATE_OPEN)
2160 netif_carrier_on(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002161 netdev_info(bp->dev, "NIC Link is Up, ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002162
Eilon Greenstein35c5f8f2009-10-15 00:19:05 -07002163 line_speed = bp->link_vars.line_speed;
2164 if (IS_E1HMF(bp)) {
2165 u16 vn_max_rate;
2166
2167 vn_max_rate =
2168 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2169 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2170 if (vn_max_rate < line_speed)
2171 line_speed = vn_max_rate;
2172 }
Joe Perches7995c642010-02-17 15:01:52 +00002173 pr_cont("%d Mbps ", line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002174
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002175 if (bp->link_vars.duplex == DUPLEX_FULL)
Joe Perches7995c642010-02-17 15:01:52 +00002176 pr_cont("full duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002177 else
Joe Perches7995c642010-02-17 15:01:52 +00002178 pr_cont("half duplex");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002179
David S. Millerc0700f92008-12-16 23:53:20 -08002180 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2181 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Joe Perches7995c642010-02-17 15:01:52 +00002182 pr_cont(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002183 if (bp->link_vars.flow_ctrl &
2184 BNX2X_FLOW_CTRL_TX)
Joe Perches7995c642010-02-17 15:01:52 +00002185 pr_cont("& transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002186 } else {
Joe Perches7995c642010-02-17 15:01:52 +00002187 pr_cont(", transmit ");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002188 }
Joe Perches7995c642010-02-17 15:01:52 +00002189 pr_cont("flow control ON");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190 }
Joe Perches7995c642010-02-17 15:01:52 +00002191 pr_cont("\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002192
2193 } else { /* link_down */
2194 netif_carrier_off(bp->dev);
Joe Perches7995c642010-02-17 15:01:52 +00002195 netdev_err(bp->dev, "NIC Link is Down\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002196 }
2197}
2198
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002199static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002200{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002201 if (!BP_NOMCP(bp)) {
2202 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002203
Eilon Greenstein19680c42008-08-13 15:47:33 -07002204 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002205 /* It is recommended to turn off RX FC for jumbo frames
2206 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002207 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002209 else
David S. Millerc0700f92008-12-16 23:53:20 -08002210 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002211
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002212 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002213
2214 if (load_mode == LOAD_DIAG)
2215 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2216
Eilon Greenstein19680c42008-08-13 15:47:33 -07002217 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002218
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002219 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002220
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002221 bnx2x_calc_fc_adv(bp);
2222
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002223 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2224 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002225 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002226 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002227
Eilon Greenstein19680c42008-08-13 15:47:33 -07002228 return rc;
2229 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002230 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002231 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002232}
2233
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002234static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002235{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002236 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002237 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002238 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002239 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002240
Eilon Greenstein19680c42008-08-13 15:47:33 -07002241 bnx2x_calc_fc_adv(bp);
2242 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002243 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002244}
2245
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002246static void bnx2x__link_reset(struct bnx2x *bp)
2247{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002248 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002249 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002250 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002251 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002252 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002253 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002254}
2255
2256static u8 bnx2x_link_test(struct bnx2x *bp)
2257{
2258 u8 rc;
2259
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002260 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002261 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002262 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002263
2264 return rc;
2265}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002266
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002267static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002268{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002269 u32 r_param = bp->link_vars.line_speed / 8;
2270 u32 fair_periodic_timeout_usec;
2271 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002272
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002273 memset(&(bp->cmng.rs_vars), 0,
2274 sizeof(struct rate_shaping_vars_per_port));
2275 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002276
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002277 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2278 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002279
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002280 /* this is the threshold below which no timer arming will occur
2281 1.25 coefficient is for the threshold to be a little bigger
2282 than the real time, to compensate for timer in-accuracy */
2283 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002284 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2285
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002286 /* resolution of fairness timer */
2287 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2288 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2289 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002290
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002291 /* this is the threshold below which we won't arm the timer anymore */
2292 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002293
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002294 /* we multiply by 1e3/8 to get bytes/msec.
2295 We don't want the credits to pass a credit
2296 of the t_fair*FAIR_MEM (algorithm resolution) */
2297 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2298 /* since each tick is 4 usec */
2299 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002300}
2301
Eilon Greenstein2691d512009-08-12 08:22:08 +00002302/* Calculates the sum of vn_min_rates.
2303 It's needed for further normalizing of the min_rates.
2304 Returns:
2305 sum of vn_min_rates.
2306 or
2307 0 - if all the min_rates are 0.
2308 In the later case fainess algorithm should be deactivated.
2309 If not all min_rates are zero then those that are zeroes will be set to 1.
2310 */
2311static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312{
2313 int all_zero = 1;
2314 int port = BP_PORT(bp);
2315 int vn;
2316
2317 bp->vn_weight_sum = 0;
2318 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2319 int func = 2*vn + port;
2320 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2321 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2322 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2323
2324 /* Skip hidden vns */
2325 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326 continue;
2327
2328 /* If min rate is zero - set it to 1 */
2329 if (!vn_min_rate)
2330 vn_min_rate = DEF_MIN_RATE;
2331 else
2332 all_zero = 0;
2333
2334 bp->vn_weight_sum += vn_min_rate;
2335 }
2336
2337 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002338 if (all_zero) {
2339 bp->cmng.flags.cmng_enables &=
2340 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2341 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2342 " fairness will be disabled\n");
2343 } else
2344 bp->cmng.flags.cmng_enables |=
2345 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002346}
2347
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002348static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002349{
2350 struct rate_shaping_vars_per_vn m_rs_vn;
2351 struct fairness_vars_per_vn m_fair_vn;
2352 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2353 u16 vn_min_rate, vn_max_rate;
2354 int i;
2355
2356 /* If function is hidden - set min and max to zeroes */
2357 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2358 vn_min_rate = 0;
2359 vn_max_rate = 0;
2360
2361 } else {
2362 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2363 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002364 /* If min rate is zero - set it to 1 */
2365 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002366 vn_min_rate = DEF_MIN_RATE;
2367 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2368 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2369 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002370 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07002371 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002372 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002373
2374 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2375 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2376
2377 /* global vn counter - maximal Mbps for this vn */
2378 m_rs_vn.vn_counter.rate = vn_max_rate;
2379
2380 /* quota - number of bytes transmitted in this period */
2381 m_rs_vn.vn_counter.quota =
2382 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2383
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002384 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002385 /* credit for each period of the fairness algorithm:
2386 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002387 vn_weight_sum should not be larger than 10000, thus
2388 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2389 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002390 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002391 max((u32)(vn_min_rate * (T_FAIR_COEF /
2392 (8 * bp->vn_weight_sum))),
2393 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002394 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2395 m_fair_vn.vn_credit_delta);
2396 }
2397
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002398 /* Store it to internal memory */
2399 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2400 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2402 ((u32 *)(&m_rs_vn))[i]);
2403
2404 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2405 REG_WR(bp, BAR_XSTRORM_INTMEM +
2406 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2407 ((u32 *)(&m_fair_vn))[i]);
2408}
2409
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002410
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002411/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002412static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002413{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002414 /* Make sure that we are synced with the current statistics */
2415 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2416
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002417 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002418
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002419 if (bp->link_vars.link_up) {
2420
Eilon Greenstein1c063282009-02-12 08:36:43 +00002421 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00002422 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002423 int port = BP_PORT(bp);
2424 u32 pause_enabled = 0;
2425
2426 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2427 pause_enabled = 1;
2428
2429 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002430 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002431 pause_enabled);
2432 }
2433
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002434 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2435 struct host_port_stats *pstats;
2436
2437 pstats = bnx2x_sp(bp, port_stats);
2438 /* reset old bmac stats */
2439 memset(&(pstats->mac_stx[0]), 0,
2440 sizeof(struct mac_stx));
2441 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002442 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002443 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2444 }
2445
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002446 /* indicate link status */
2447 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002448
2449 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002450 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002451 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002452 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002453
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002454 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002455 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2456 if (vn == BP_E1HVN(bp))
2457 continue;
2458
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002459 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002460 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2461 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2462 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002463
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002464 if (bp->link_vars.link_up) {
2465 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002466
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002467 /* Init rate shaping and fairness contexts */
2468 bnx2x_init_port_minmax(bp);
2469
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002470 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002471 bnx2x_init_vn_minmax(bp, 2*vn + port);
2472
2473 /* Store it to internal memory */
2474 for (i = 0;
2475 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2476 REG_WR(bp, BAR_XSTRORM_INTMEM +
2477 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2478 ((u32 *)(&bp->cmng))[i]);
2479 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002480 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002481}
2482
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002483static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002484{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002485 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002486 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002487
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002488 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2489
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002490 if (bp->link_vars.link_up)
2491 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2492 else
2493 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2494
Eilon Greenstein2691d512009-08-12 08:22:08 +00002495 bnx2x_calc_vn_weight_sum(bp);
2496
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002497 /* indicate link status */
2498 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002499}
2500
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002501static void bnx2x_pmf_update(struct bnx2x *bp)
2502{
2503 int port = BP_PORT(bp);
2504 u32 val;
2505
2506 bp->port.pmf = 1;
2507 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2508
2509 /* enable nig attention */
2510 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2511 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2512 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002513
2514 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002515}
2516
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002517/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002518
2519/* slow path */
2520
2521/*
2522 * General service functions
2523 */
2524
Eilon Greenstein2691d512009-08-12 08:22:08 +00002525/* send the MCP a request, block until there is a reply */
2526u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2527{
2528 int func = BP_FUNC(bp);
2529 u32 seq = ++bp->fw_seq;
2530 u32 rc = 0;
2531 u32 cnt = 1;
2532 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2533
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002534 mutex_lock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002535 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537
2538 do {
2539 /* let the FW do it's magic ... */
2540 msleep(delay);
2541
2542 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2543
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002544 /* Give the FW up to 5 second (500*10ms) */
2545 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002546
2547 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548 cnt*delay, rc, seq);
2549
2550 /* is this a reply to our command? */
2551 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552 rc &= FW_MSG_CODE_MASK;
2553 else {
2554 /* FW BUG! */
2555 BNX2X_ERR("FW failed to respond!\n");
2556 bnx2x_fw_dump(bp);
2557 rc = 0;
2558 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002559 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002560
2561 return rc;
2562}
2563
2564static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
Michael Chane665bfd2009-10-10 13:46:54 +00002565static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002566static void bnx2x_set_rx_mode(struct net_device *dev);
2567
2568static void bnx2x_e1h_disable(struct bnx2x *bp)
2569{
2570 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002571
2572 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002573
2574 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2575
Eilon Greenstein2691d512009-08-12 08:22:08 +00002576 netif_carrier_off(bp->dev);
2577}
2578
2579static void bnx2x_e1h_enable(struct bnx2x *bp)
2580{
2581 int port = BP_PORT(bp);
2582
2583 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2584
Eilon Greenstein2691d512009-08-12 08:22:08 +00002585 /* Tx queue should be only reenabled */
2586 netif_tx_wake_all_queues(bp->dev);
2587
Eilon Greenstein061bc702009-10-15 00:18:47 -07002588 /*
2589 * Should not call netif_carrier_on since it will be called if the link
2590 * is up when checking for link state
2591 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002592}
2593
2594static void bnx2x_update_min_max(struct bnx2x *bp)
2595{
2596 int port = BP_PORT(bp);
2597 int vn, i;
2598
2599 /* Init rate shaping and fairness contexts */
2600 bnx2x_init_port_minmax(bp);
2601
2602 bnx2x_calc_vn_weight_sum(bp);
2603
2604 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2605 bnx2x_init_vn_minmax(bp, 2*vn + port);
2606
2607 if (bp->port.pmf) {
2608 int func;
2609
2610 /* Set the attention towards other drivers on the same port */
2611 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2612 if (vn == BP_E1HVN(bp))
2613 continue;
2614
2615 func = ((vn << 1) | port);
2616 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2617 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2618 }
2619
2620 /* Store it to internal memory */
2621 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2622 REG_WR(bp, BAR_XSTRORM_INTMEM +
2623 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2624 ((u32 *)(&bp->cmng))[i]);
2625 }
2626}
2627
2628static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2629{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002630 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002631
2632 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2633
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002634 /*
2635 * This is the only place besides the function initialization
2636 * where the bp->flags can change so it is done without any
2637 * locks
2638 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002639 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2640 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002641 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002642
2643 bnx2x_e1h_disable(bp);
2644 } else {
2645 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002646 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002647
2648 bnx2x_e1h_enable(bp);
2649 }
2650 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2651 }
2652 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2653
2654 bnx2x_update_min_max(bp);
2655 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2656 }
2657
2658 /* Report results to MCP */
2659 if (dcc_event)
2660 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2661 else
2662 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2663}
2664
Michael Chan28912902009-10-10 13:46:53 +00002665/* must be called under the spq lock */
2666static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2667{
2668 struct eth_spe *next_spe = bp->spq_prod_bd;
2669
2670 if (bp->spq_prod_bd == bp->spq_last_bd) {
2671 bp->spq_prod_bd = bp->spq;
2672 bp->spq_prod_idx = 0;
2673 DP(NETIF_MSG_TIMER, "end of spq\n");
2674 } else {
2675 bp->spq_prod_bd++;
2676 bp->spq_prod_idx++;
2677 }
2678 return next_spe;
2679}
2680
2681/* must be called under the spq lock */
2682static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2683{
2684 int func = BP_FUNC(bp);
2685
2686 /* Make sure that BD data is updated before writing the producer */
2687 wmb();
2688
2689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2690 bp->spq_prod_idx);
2691 mmiowb();
2692}
2693
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002694/* the slow path queue is odd since completions arrive on the fastpath ring */
2695static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696 u32 data_hi, u32 data_lo, int common)
2697{
Michael Chan28912902009-10-10 13:46:53 +00002698 struct eth_spe *spe;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002700 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2701 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002702 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2703 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2704 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2705
2706#ifdef BNX2X_STOP_ON_ERROR
2707 if (unlikely(bp->panic))
2708 return -EIO;
2709#endif
2710
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002711 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002712
2713 if (!bp->spq_left) {
2714 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002715 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002716 bnx2x_panic();
2717 return -EBUSY;
2718 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002719
Michael Chan28912902009-10-10 13:46:53 +00002720 spe = bnx2x_sp_get_next(bp);
2721
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002722 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002723 spe->hdr.conn_and_cmd_data =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002724 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2725 HW_CID(bp, cid)));
Michael Chan28912902009-10-10 13:46:53 +00002726 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002727 if (common)
Michael Chan28912902009-10-10 13:46:53 +00002728 spe->hdr.type |=
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002729 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2730
Michael Chan28912902009-10-10 13:46:53 +00002731 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2732 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733
2734 bp->spq_left--;
2735
Michael Chan28912902009-10-10 13:46:53 +00002736 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002737 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738 return 0;
2739}
2740
2741/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002742static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002743{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002744 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002745 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002746
2747 might_sleep();
2748 i = 100;
2749 for (j = 0; j < i*10; j++) {
2750 val = (1UL << 31);
2751 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2752 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2753 if (val & (1L << 31))
2754 break;
2755
2756 msleep(5);
2757 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002758 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002759 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002760 rc = -EBUSY;
2761 }
2762
2763 return rc;
2764}
2765
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002766/* release split MCP access lock register */
2767static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002768{
2769 u32 val = 0;
2770
2771 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2772}
2773
2774static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2775{
2776 struct host_def_status_block *def_sb = bp->def_status_blk;
2777 u16 rc = 0;
2778
2779 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002780 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2781 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2782 rc |= 1;
2783 }
2784 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2785 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2786 rc |= 2;
2787 }
2788 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2789 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2790 rc |= 4;
2791 }
2792 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2793 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2794 rc |= 8;
2795 }
2796 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2797 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2798 rc |= 16;
2799 }
2800 return rc;
2801}
2802
2803/*
2804 * slow path service functions
2805 */
2806
2807static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2808{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002809 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002810 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2811 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2813 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002814 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2815 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002816 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002817 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002818
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002819 if (bp->attn_state & asserted)
2820 BNX2X_ERR("IGU ERROR\n");
2821
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2823 aeu_mask = REG_RD(bp, aeu_addr);
2824
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002825 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002826 aeu_mask, asserted);
2827 aeu_mask &= ~(asserted & 0xff);
2828 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002830 REG_WR(bp, aeu_addr, aeu_mask);
2831 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002832
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002833 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002834 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002835 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002836
2837 if (asserted & ATTN_HARD_WIRED_MASK) {
2838 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002839
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002840 bnx2x_acquire_phy_lock(bp);
2841
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002842 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002843 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002844 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002846 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002847
2848 /* handle unicore attn? */
2849 }
2850 if (asserted & ATTN_SW_TIMER_4_FUNC)
2851 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2852
2853 if (asserted & GPIO_2_FUNC)
2854 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2855
2856 if (asserted & GPIO_3_FUNC)
2857 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2858
2859 if (asserted & GPIO_4_FUNC)
2860 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2861
2862 if (port == 0) {
2863 if (asserted & ATTN_GENERAL_ATTN_1) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_2) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_3) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2874 }
2875 } else {
2876 if (asserted & ATTN_GENERAL_ATTN_4) {
2877 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2878 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2879 }
2880 if (asserted & ATTN_GENERAL_ATTN_5) {
2881 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2882 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2883 }
2884 if (asserted & ATTN_GENERAL_ATTN_6) {
2885 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2886 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2887 }
2888 }
2889
2890 } /* if hardwired */
2891
Eilon Greenstein5c862842008-08-13 15:51:48 -07002892 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2893 asserted, hc_addr);
2894 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002895
2896 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002897 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002898 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002899 bnx2x_release_phy_lock(bp);
2900 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002901}
2902
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002903static inline void bnx2x_fan_failure(struct bnx2x *bp)
2904{
2905 int port = BP_PORT(bp);
2906
2907 /* mark the failure */
2908 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2909 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2910 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2911 bp->link_params.ext_phy_config);
2912
2913 /* log the failure */
Joe Perches7995c642010-02-17 15:01:52 +00002914 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2915 "Please contact Dell Support for assistance.\n");
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002916}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002917
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002918static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2919{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002920 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002921 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002922 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002923
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002924 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2925 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002926
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002927 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002928
2929 val = REG_RD(bp, reg_offset);
2930 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2931 REG_WR(bp, reg_offset, val);
2932
2933 BNX2X_ERR("SPIO5 hw attention\n");
2934
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002935 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002936 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002938 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002939 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002940 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002941 /* The PHY reset is controlled by GPIO 1 */
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002944 break;
2945
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2947 /* The PHY reset is controlled by GPIO 1 */
2948 /* fake the port number to cancel the swap done in
2949 set_gpio() */
2950 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2951 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2952 port = (swap_val && swap_override) ^ 1;
2953 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2954 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2955 break;
2956
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002957 default:
2958 break;
2959 }
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00002960 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002961 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002962
Eilon Greenstein589abe32009-02-12 08:36:55 +00002963 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2964 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2965 bnx2x_acquire_phy_lock(bp);
2966 bnx2x_handle_module_detect_int(&bp->link_params);
2967 bnx2x_release_phy_lock(bp);
2968 }
2969
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002970 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2971
2972 val = REG_RD(bp, reg_offset);
2973 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2974 REG_WR(bp, reg_offset, val);
2975
2976 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002977 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002978 bnx2x_panic();
2979 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002980}
2981
2982static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2983{
2984 u32 val;
2985
Eilon Greenstein0626b892009-02-12 08:38:14 +00002986 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002987
2988 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2989 BNX2X_ERR("DB hw attention 0x%x\n", val);
2990 /* DORQ discard attention */
2991 if (val & 0x2)
2992 BNX2X_ERR("FATAL error from DORQ\n");
2993 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002994
2995 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2996
2997 int port = BP_PORT(bp);
2998 int reg_offset;
2999
3000 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3001 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3002
3003 val = REG_RD(bp, reg_offset);
3004 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3005 REG_WR(bp, reg_offset, val);
3006
3007 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003008 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003009 bnx2x_panic();
3010 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003011}
3012
3013static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3014{
3015 u32 val;
3016
3017 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3018
3019 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3020 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3021 /* CFC error attention */
3022 if (val & 0x2)
3023 BNX2X_ERR("FATAL error from CFC\n");
3024 }
3025
3026 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3027
3028 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3029 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3030 /* RQ_USDMDP_FIFO_OVERFLOW */
3031 if (val & 0x18000)
3032 BNX2X_ERR("FATAL error from PXP\n");
3033 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003034
3035 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3036
3037 int port = BP_PORT(bp);
3038 int reg_offset;
3039
3040 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3041 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3042
3043 val = REG_RD(bp, reg_offset);
3044 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3045 REG_WR(bp, reg_offset, val);
3046
3047 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003048 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003049 bnx2x_panic();
3050 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003051}
3052
3053static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3054{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003055 u32 val;
3056
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003057 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3058
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003059 if (attn & BNX2X_PMF_LINK_ASSERT) {
3060 int func = BP_FUNC(bp);
3061
3062 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07003063 bp->mf_config = SHMEM_RD(bp,
3064 mf_cfg.func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003065 val = SHMEM_RD(bp, func_mb[func].drv_status);
3066 if (val & DRV_STATUS_DCC_EVENT_MASK)
3067 bnx2x_dcc_event(bp,
3068 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003069 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003070 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003071 bnx2x_pmf_update(bp);
3072
3073 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003074
3075 BNX2X_ERR("MC assert!\n");
3076 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3077 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3079 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3080 bnx2x_panic();
3081
3082 } else if (attn & BNX2X_MCP_ASSERT) {
3083
3084 BNX2X_ERR("MCP assert!\n");
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003086 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003087
3088 } else
3089 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3090 }
3091
3092 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003093 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3094 if (attn & BNX2X_GRC_TIMEOUT) {
3095 val = CHIP_IS_E1H(bp) ?
3096 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3097 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3098 }
3099 if (attn & BNX2X_GRC_RSV) {
3100 val = CHIP_IS_E1H(bp) ?
3101 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3102 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3103 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003104 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003105 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003106}
3107
3108static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3109{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003110 struct attn_route attn;
3111 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003112 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003113 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003114 u32 reg_addr;
3115 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003116 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003117
3118 /* need to take HW lock because MCP or other port might also
3119 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003120 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003121
3122 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3123 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3124 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3125 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003126 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3127 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003128
3129 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3130 if (deasserted & (1 << index)) {
3131 group_mask = bp->attn_group[index];
3132
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003133 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3134 index, group_mask.sig[0], group_mask.sig[1],
3135 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003136
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003137 bnx2x_attn_int_deasserted3(bp,
3138 attn.sig[3] & group_mask.sig[3]);
3139 bnx2x_attn_int_deasserted1(bp,
3140 attn.sig[1] & group_mask.sig[1]);
3141 bnx2x_attn_int_deasserted2(bp,
3142 attn.sig[2] & group_mask.sig[2]);
3143 bnx2x_attn_int_deasserted0(bp,
3144 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003145
3146 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003147 HW_PRTY_ASSERT_SET_0) ||
3148 (attn.sig[1] & group_mask.sig[1] &
3149 HW_PRTY_ASSERT_SET_1) ||
3150 (attn.sig[2] & group_mask.sig[2] &
3151 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003152 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003153 }
3154 }
3155
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003156 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003157
Eilon Greenstein5c862842008-08-13 15:51:48 -07003158 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003159
3160 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003161 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3162 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003163 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003164
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003165 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003166 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003167
3168 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3169 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3170
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003171 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3172 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003173
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003174 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3175 aeu_mask, deasserted);
3176 aeu_mask |= (deasserted & 0xff);
3177 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3178
3179 REG_WR(bp, reg_addr, aeu_mask);
3180 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003181
3182 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3183 bp->attn_state &= ~deasserted;
3184 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3185}
3186
3187static void bnx2x_attn_int(struct bnx2x *bp)
3188{
3189 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003190 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3191 attn_bits);
3192 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3193 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003194 u32 attn_state = bp->attn_state;
3195
3196 /* look for changed bits */
3197 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3198 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3199
3200 DP(NETIF_MSG_HW,
3201 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3202 attn_bits, attn_ack, asserted, deasserted);
3203
3204 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003205 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003206
3207 /* handle bits that were raised */
3208 if (asserted)
3209 bnx2x_attn_int_asserted(bp, asserted);
3210
3211 if (deasserted)
3212 bnx2x_attn_int_deasserted(bp, deasserted);
3213}
3214
3215static void bnx2x_sp_task(struct work_struct *work)
3216{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003217 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003218 u16 status;
3219
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003220
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003221 /* Return here if interrupt is disabled */
3222 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003223 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003224 return;
3225 }
3226
3227 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003228/* if (status == 0) */
3229/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003230
Eilon Greenstein3196a882008-08-13 15:58:49 -07003231 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003232
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003233 /* HW attentions */
3234 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003235 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003236
Eilon Greenstein68d59482009-01-14 21:27:36 -08003237 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003238 IGU_INT_NOP, 1);
3239 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3240 IGU_INT_NOP, 1);
3241 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3242 IGU_INT_NOP, 1);
3243 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3244 IGU_INT_NOP, 1);
3245 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3246 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003247
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003248}
3249
3250static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3251{
3252 struct net_device *dev = dev_instance;
3253 struct bnx2x *bp = netdev_priv(dev);
3254
3255 /* Return here if interrupt is disabled */
3256 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003257 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003258 return IRQ_HANDLED;
3259 }
3260
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003261 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003262
3263#ifdef BNX2X_STOP_ON_ERROR
3264 if (unlikely(bp->panic))
3265 return IRQ_HANDLED;
3266#endif
3267
Michael Chan993ac7b2009-10-10 13:46:56 +00003268#ifdef BCM_CNIC
3269 {
3270 struct cnic_ops *c_ops;
3271
3272 rcu_read_lock();
3273 c_ops = rcu_dereference(bp->cnic_ops);
3274 if (c_ops)
3275 c_ops->cnic_handler(bp->cnic_data, NULL);
3276 rcu_read_unlock();
3277 }
3278#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003279 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003280
3281 return IRQ_HANDLED;
3282}
3283
3284/* end of slow path */
3285
3286/* Statistics */
3287
3288/****************************************************************************
3289* Macros
3290****************************************************************************/
3291
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003292/* sum[hi:lo] += add[hi:lo] */
3293#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3294 do { \
3295 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003296 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003297 } while (0)
3298
3299/* difference = minuend - subtrahend */
3300#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3301 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003302 if (m_lo < s_lo) { \
3303 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003304 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003305 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003306 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003307 d_hi--; \
3308 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003309 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003310 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003311 d_hi = 0; \
3312 d_lo = 0; \
3313 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003314 } else { \
3315 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003316 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003317 d_hi = 0; \
3318 d_lo = 0; \
3319 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003320 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003321 d_hi = m_hi - s_hi; \
3322 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003323 } \
3324 } \
3325 } while (0)
3326
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003327#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003328 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003329 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3330 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3331 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3332 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3333 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3334 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003335 } while (0)
3336
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003337#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003338 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003339 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3340 diff.lo, new->s##_lo, old->s##_lo); \
3341 ADD_64(estats->t##_hi, diff.hi, \
3342 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003343 } while (0)
3344
3345/* sum[hi:lo] += add */
3346#define ADD_EXTEND_64(s_hi, s_lo, a) \
3347 do { \
3348 s_lo += a; \
3349 s_hi += (s_lo < a) ? 1 : 0; \
3350 } while (0)
3351
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003352#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003353 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003354 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3355 pstats->mac_stx[1].s##_lo, \
3356 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003357 } while (0)
3358
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003359#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003360 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003361 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3362 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003363 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3364 } while (0)
3365
3366#define UPDATE_EXTEND_USTAT(s, t) \
3367 do { \
3368 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3369 old_uclient->s = uclient->s; \
3370 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003371 } while (0)
3372
3373#define UPDATE_EXTEND_XSTAT(s, t) \
3374 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003375 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3376 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003377 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3378 } while (0)
3379
3380/* minuend -= subtrahend */
3381#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3382 do { \
3383 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3384 } while (0)
3385
3386/* minuend[hi:lo] -= subtrahend */
3387#define SUB_EXTEND_64(m_hi, m_lo, s) \
3388 do { \
3389 SUB_64(m_hi, 0, m_lo, s); \
3390 } while (0)
3391
3392#define SUB_EXTEND_USTAT(s, t) \
3393 do { \
3394 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3395 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003396 } while (0)
3397
3398/*
3399 * General service functions
3400 */
3401
3402static inline long bnx2x_hilo(u32 *hiref)
3403{
3404 u32 lo = *(hiref + 1);
3405#if (BITS_PER_LONG == 64)
3406 u32 hi = *hiref;
3407
3408 return HILO_U64(hi, lo);
3409#else
3410 return lo;
3411#endif
3412}
3413
3414/*
3415 * Init service functions
3416 */
3417
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003418static void bnx2x_storm_stats_post(struct bnx2x *bp)
3419{
3420 if (!bp->stats_pending) {
3421 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003422 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003423
3424 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003425 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003426 for_each_queue(bp, i)
3427 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003428
3429 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3430 ((u32 *)&ramrod_data)[1],
3431 ((u32 *)&ramrod_data)[0], 0);
3432 if (rc == 0) {
3433 /* stats ramrod has it's own slot on the spq */
3434 bp->spq_left++;
3435 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003436 }
3437 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003438}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003439
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003440static void bnx2x_hw_stats_post(struct bnx2x *bp)
3441{
3442 struct dmae_command *dmae = &bp->stats_dmae;
3443 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3444
3445 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003446 if (CHIP_REV_IS_SLOW(bp))
3447 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003448
3449 /* loader */
3450 if (bp->executer_idx) {
3451 int loader_idx = PMF_DMAE_C(bp);
3452
3453 memset(dmae, 0, sizeof(struct dmae_command));
3454
3455 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3456 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3457 DMAE_CMD_DST_RESET |
3458#ifdef __BIG_ENDIAN
3459 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3460#else
3461 DMAE_CMD_ENDIANITY_DW_SWAP |
3462#endif
3463 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3464 DMAE_CMD_PORT_0) |
3465 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3466 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3467 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3468 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3469 sizeof(struct dmae_command) *
3470 (loader_idx + 1)) >> 2;
3471 dmae->dst_addr_hi = 0;
3472 dmae->len = sizeof(struct dmae_command) >> 2;
3473 if (CHIP_IS_E1(bp))
3474 dmae->len--;
3475 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3476 dmae->comp_addr_hi = 0;
3477 dmae->comp_val = 1;
3478
3479 *stats_comp = 0;
3480 bnx2x_post_dmae(bp, dmae, loader_idx);
3481
3482 } else if (bp->func_stx) {
3483 *stats_comp = 0;
3484 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3485 }
3486}
3487
3488static int bnx2x_stats_comp(struct bnx2x *bp)
3489{
3490 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3491 int cnt = 10;
3492
3493 might_sleep();
3494 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003495 if (!cnt) {
3496 BNX2X_ERR("timeout waiting for stats finished\n");
3497 break;
3498 }
3499 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003500 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003501 }
3502 return 1;
3503}
3504
3505/*
3506 * Statistics service functions
3507 */
3508
3509static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3510{
3511 struct dmae_command *dmae;
3512 u32 opcode;
3513 int loader_idx = PMF_DMAE_C(bp);
3514 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3515
3516 /* sanity */
3517 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3518 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003519 return;
3520 }
3521
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003522 bp->executer_idx = 0;
3523
3524 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3525 DMAE_CMD_C_ENABLE |
3526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527#ifdef __BIG_ENDIAN
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529#else
3530 DMAE_CMD_ENDIANITY_DW_SWAP |
3531#endif
3532 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3534
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3537 dmae->src_addr_lo = bp->port.port_stx >> 2;
3538 dmae->src_addr_hi = 0;
3539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3541 dmae->len = DMAE_LEN32_RD_MAX;
3542 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3543 dmae->comp_addr_hi = 0;
3544 dmae->comp_val = 1;
3545
3546 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3547 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3548 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3549 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003550 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3551 DMAE_LEN32_RD_MAX * 4);
3552 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3553 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003554 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3555 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3556 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3557 dmae->comp_val = DMAE_COMP_VAL;
3558
3559 *stats_comp = 0;
3560 bnx2x_hw_stats_post(bp);
3561 bnx2x_stats_comp(bp);
3562}
3563
3564static void bnx2x_port_stats_init(struct bnx2x *bp)
3565{
3566 struct dmae_command *dmae;
3567 int port = BP_PORT(bp);
3568 int vn = BP_E1HVN(bp);
3569 u32 opcode;
3570 int loader_idx = PMF_DMAE_C(bp);
3571 u32 mac_addr;
3572 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3573
3574 /* sanity */
3575 if (!bp->link_vars.link_up || !bp->port.pmf) {
3576 BNX2X_ERR("BUG!\n");
3577 return;
3578 }
3579
3580 bp->executer_idx = 0;
3581
3582 /* MCP */
3583 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3584 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3585 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3586#ifdef __BIG_ENDIAN
3587 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3588#else
3589 DMAE_CMD_ENDIANITY_DW_SWAP |
3590#endif
3591 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3592 (vn << DMAE_CMD_E1HVN_SHIFT));
3593
3594 if (bp->port.port_stx) {
3595
3596 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597 dmae->opcode = opcode;
3598 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3599 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3600 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3601 dmae->dst_addr_hi = 0;
3602 dmae->len = sizeof(struct host_port_stats) >> 2;
3603 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3604 dmae->comp_addr_hi = 0;
3605 dmae->comp_val = 1;
3606 }
3607
3608 if (bp->func_stx) {
3609
3610 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3611 dmae->opcode = opcode;
3612 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3613 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3614 dmae->dst_addr_lo = bp->func_stx >> 2;
3615 dmae->dst_addr_hi = 0;
3616 dmae->len = sizeof(struct host_func_stats) >> 2;
3617 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3618 dmae->comp_addr_hi = 0;
3619 dmae->comp_val = 1;
3620 }
3621
3622 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003623 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3624 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3625 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3626#ifdef __BIG_ENDIAN
3627 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3628#else
3629 DMAE_CMD_ENDIANITY_DW_SWAP |
3630#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003631 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3632 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003633
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003634 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003635
3636 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3637 NIG_REG_INGRESS_BMAC0_MEM);
3638
3639 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3640 BIGMAC_REGISTER_TX_STAT_GTBYT */
3641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642 dmae->opcode = opcode;
3643 dmae->src_addr_lo = (mac_addr +
3644 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3645 dmae->src_addr_hi = 0;
3646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3648 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3649 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3651 dmae->comp_addr_hi = 0;
3652 dmae->comp_val = 1;
3653
3654 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3655 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3656 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3657 dmae->opcode = opcode;
3658 dmae->src_addr_lo = (mac_addr +
3659 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3660 dmae->src_addr_hi = 0;
3661 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003662 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003663 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003664 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003665 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3666 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3667 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3668 dmae->comp_addr_hi = 0;
3669 dmae->comp_val = 1;
3670
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003671 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003672
3673 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3674
3675 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677 dmae->opcode = opcode;
3678 dmae->src_addr_lo = (mac_addr +
3679 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3680 dmae->src_addr_hi = 0;
3681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3682 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3683 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3684 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3685 dmae->comp_addr_hi = 0;
3686 dmae->comp_val = 1;
3687
3688 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3689 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3690 dmae->opcode = opcode;
3691 dmae->src_addr_lo = (mac_addr +
3692 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3693 dmae->src_addr_hi = 0;
3694 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003695 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003696 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003697 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698 dmae->len = 1;
3699 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3700 dmae->comp_addr_hi = 0;
3701 dmae->comp_val = 1;
3702
3703 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3704 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3705 dmae->opcode = opcode;
3706 dmae->src_addr_lo = (mac_addr +
3707 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3708 dmae->src_addr_hi = 0;
3709 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003710 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003711 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003712 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003713 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3714 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3715 dmae->comp_addr_hi = 0;
3716 dmae->comp_val = 1;
3717 }
3718
3719 /* NIG */
3720 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003721 dmae->opcode = opcode;
3722 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3723 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3724 dmae->src_addr_hi = 0;
3725 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3726 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3727 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3728 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3729 dmae->comp_addr_hi = 0;
3730 dmae->comp_val = 1;
3731
3732 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3733 dmae->opcode = opcode;
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3735 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3736 dmae->src_addr_hi = 0;
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3738 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3739 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3740 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3741 dmae->len = (2*sizeof(u32)) >> 2;
3742 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3743 dmae->comp_addr_hi = 0;
3744 dmae->comp_val = 1;
3745
3746 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003747 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3748 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3749 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3750#ifdef __BIG_ENDIAN
3751 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3752#else
3753 DMAE_CMD_ENDIANITY_DW_SWAP |
3754#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003755 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3756 (vn << DMAE_CMD_E1HVN_SHIFT));
3757 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3758 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003760 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3761 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3762 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3763 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3764 dmae->len = (2*sizeof(u32)) >> 2;
3765 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3766 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3767 dmae->comp_val = DMAE_COMP_VAL;
3768
3769 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003770}
3771
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003772static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003773{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003774 struct dmae_command *dmae = &bp->stats_dmae;
3775 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003776
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003777 /* sanity */
3778 if (!bp->func_stx) {
3779 BNX2X_ERR("BUG!\n");
3780 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003781 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003782
3783 bp->executer_idx = 0;
3784 memset(dmae, 0, sizeof(struct dmae_command));
3785
3786 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3787 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3788 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3789#ifdef __BIG_ENDIAN
3790 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3791#else
3792 DMAE_CMD_ENDIANITY_DW_SWAP |
3793#endif
3794 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3795 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3796 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3797 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3798 dmae->dst_addr_lo = bp->func_stx >> 2;
3799 dmae->dst_addr_hi = 0;
3800 dmae->len = sizeof(struct host_func_stats) >> 2;
3801 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3802 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3803 dmae->comp_val = DMAE_COMP_VAL;
3804
3805 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003806}
3807
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003808static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003809{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003810 if (bp->port.pmf)
3811 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003812
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003813 else if (bp->func_stx)
3814 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003815
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003816 bnx2x_hw_stats_post(bp);
3817 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818}
3819
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003820static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003822 bnx2x_stats_comp(bp);
3823 bnx2x_stats_pmf_update(bp);
3824 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003825}
3826
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003827static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003828{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003829 bnx2x_stats_comp(bp);
3830 bnx2x_stats_start(bp);
3831}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003832
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003833static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3834{
3835 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3836 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003837 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003838 struct {
3839 u32 lo;
3840 u32 hi;
3841 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003842
3843 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3844 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3845 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3846 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3847 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3848 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003849 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003850 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003851 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003852 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3853 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3854 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3855 UPDATE_STAT64(tx_stat_gt127,
3856 tx_stat_etherstatspkts65octetsto127octets);
3857 UPDATE_STAT64(tx_stat_gt255,
3858 tx_stat_etherstatspkts128octetsto255octets);
3859 UPDATE_STAT64(tx_stat_gt511,
3860 tx_stat_etherstatspkts256octetsto511octets);
3861 UPDATE_STAT64(tx_stat_gt1023,
3862 tx_stat_etherstatspkts512octetsto1023octets);
3863 UPDATE_STAT64(tx_stat_gt1518,
3864 tx_stat_etherstatspkts1024octetsto1522octets);
3865 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3866 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3867 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3868 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3869 UPDATE_STAT64(tx_stat_gterr,
3870 tx_stat_dot3statsinternalmactransmiterrors);
3871 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003872
3873 estats->pause_frames_received_hi =
3874 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3875 estats->pause_frames_received_lo =
3876 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3877
3878 estats->pause_frames_sent_hi =
3879 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3880 estats->pause_frames_sent_lo =
3881 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003882}
3883
3884static void bnx2x_emac_stats_update(struct bnx2x *bp)
3885{
3886 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3887 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003888 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003889
3890 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3891 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3892 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3893 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3894 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3895 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3896 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3897 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3898 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3899 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3900 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3901 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3902 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3903 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3904 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3905 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3906 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3908 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3910 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3911 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3912 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3914 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3915 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3916 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3917 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003921
3922 estats->pause_frames_received_hi =
3923 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3924 estats->pause_frames_received_lo =
3925 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3926 ADD_64(estats->pause_frames_received_hi,
3927 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3928 estats->pause_frames_received_lo,
3929 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3930
3931 estats->pause_frames_sent_hi =
3932 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3933 estats->pause_frames_sent_lo =
3934 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3935 ADD_64(estats->pause_frames_sent_hi,
3936 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3937 estats->pause_frames_sent_lo,
3938 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003939}
3940
3941static int bnx2x_hw_stats_update(struct bnx2x *bp)
3942{
3943 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3944 struct nig_stats *old = &(bp->port.old_nig_stats);
3945 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3946 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003947 struct {
3948 u32 lo;
3949 u32 hi;
3950 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003951 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003952
3953 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3954 bnx2x_bmac_stats_update(bp);
3955
3956 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3957 bnx2x_emac_stats_update(bp);
3958
3959 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003960 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003961 return -1;
3962 }
3963
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003964 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3965 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003966 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3967 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003968
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003969 UPDATE_STAT64_NIG(egress_mac_pkt0,
3970 etherstatspkts1024octetsto1522octets);
3971 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003972
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003973 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003974
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003975 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3976 sizeof(struct mac_stx));
3977 estats->brb_drop_hi = pstats->brb_drop_hi;
3978 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003979
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003980 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003981
Eilon Greensteinde832a52009-02-12 08:36:33 +00003982 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3983 if (nig_timer_max != estats->nig_timer_max) {
3984 estats->nig_timer_max = nig_timer_max;
3985 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3986 }
3987
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003988 return 0;
3989}
3990
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003991static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003992{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003993 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003994 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003995 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003996 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3997 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003998 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003999
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004000 memcpy(&(fstats->total_bytes_received_hi),
4001 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004002 sizeof(struct host_func_stats) - 2*sizeof(u32));
4003 estats->error_bytes_received_hi = 0;
4004 estats->error_bytes_received_lo = 0;
4005 estats->etherstatsoverrsizepkts_hi = 0;
4006 estats->etherstatsoverrsizepkts_lo = 0;
4007 estats->no_buff_discard_hi = 0;
4008 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004009
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004010 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004011 struct bnx2x_fastpath *fp = &bp->fp[i];
4012 int cl_id = fp->cl_id;
4013 struct tstorm_per_client_stats *tclient =
4014 &stats->tstorm_common.client_statistics[cl_id];
4015 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4016 struct ustorm_per_client_stats *uclient =
4017 &stats->ustorm_common.client_statistics[cl_id];
4018 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4019 struct xstorm_per_client_stats *xclient =
4020 &stats->xstorm_common.client_statistics[cl_id];
4021 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4022 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4023 u32 diff;
4024
4025 /* are storm stats valid? */
4026 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4027 bp->stats_counter) {
4028 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4029 " xstorm counter (%d) != stats_counter (%d)\n",
4030 i, xclient->stats_counter, bp->stats_counter);
4031 return -1;
4032 }
4033 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4034 bp->stats_counter) {
4035 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4036 " tstorm counter (%d) != stats_counter (%d)\n",
4037 i, tclient->stats_counter, bp->stats_counter);
4038 return -2;
4039 }
4040 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4041 bp->stats_counter) {
4042 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4043 " ustorm counter (%d) != stats_counter (%d)\n",
4044 i, uclient->stats_counter, bp->stats_counter);
4045 return -4;
4046 }
4047
4048 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004049 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004050 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004051 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4052
4053 ADD_64(qstats->total_bytes_received_hi,
4054 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4055 qstats->total_bytes_received_lo,
4056 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4057
4058 ADD_64(qstats->total_bytes_received_hi,
4059 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4060 qstats->total_bytes_received_lo,
4061 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4062
4063 qstats->valid_bytes_received_hi =
4064 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004065 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004066 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004067
Eilon Greensteinde832a52009-02-12 08:36:33 +00004068 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004069 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004070 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004071 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004072
4073 ADD_64(qstats->total_bytes_received_hi,
4074 qstats->error_bytes_received_hi,
4075 qstats->total_bytes_received_lo,
4076 qstats->error_bytes_received_lo);
4077
4078 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4079 total_unicast_packets_received);
4080 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4081 total_multicast_packets_received);
4082 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4083 total_broadcast_packets_received);
4084 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4085 etherstatsoverrsizepkts);
4086 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4087
4088 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4089 total_unicast_packets_received);
4090 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4091 total_multicast_packets_received);
4092 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4093 total_broadcast_packets_received);
4094 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4095 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4096 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4097
4098 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004099 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004100 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004101 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4102
4103 ADD_64(qstats->total_bytes_transmitted_hi,
4104 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4105 qstats->total_bytes_transmitted_lo,
4106 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4107
4108 ADD_64(qstats->total_bytes_transmitted_hi,
4109 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4110 qstats->total_bytes_transmitted_lo,
4111 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004112
4113 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4114 total_unicast_packets_transmitted);
4115 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4116 total_multicast_packets_transmitted);
4117 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4118 total_broadcast_packets_transmitted);
4119
4120 old_tclient->checksum_discard = tclient->checksum_discard;
4121 old_tclient->ttl0_discard = tclient->ttl0_discard;
4122
4123 ADD_64(fstats->total_bytes_received_hi,
4124 qstats->total_bytes_received_hi,
4125 fstats->total_bytes_received_lo,
4126 qstats->total_bytes_received_lo);
4127 ADD_64(fstats->total_bytes_transmitted_hi,
4128 qstats->total_bytes_transmitted_hi,
4129 fstats->total_bytes_transmitted_lo,
4130 qstats->total_bytes_transmitted_lo);
4131 ADD_64(fstats->total_unicast_packets_received_hi,
4132 qstats->total_unicast_packets_received_hi,
4133 fstats->total_unicast_packets_received_lo,
4134 qstats->total_unicast_packets_received_lo);
4135 ADD_64(fstats->total_multicast_packets_received_hi,
4136 qstats->total_multicast_packets_received_hi,
4137 fstats->total_multicast_packets_received_lo,
4138 qstats->total_multicast_packets_received_lo);
4139 ADD_64(fstats->total_broadcast_packets_received_hi,
4140 qstats->total_broadcast_packets_received_hi,
4141 fstats->total_broadcast_packets_received_lo,
4142 qstats->total_broadcast_packets_received_lo);
4143 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4144 qstats->total_unicast_packets_transmitted_hi,
4145 fstats->total_unicast_packets_transmitted_lo,
4146 qstats->total_unicast_packets_transmitted_lo);
4147 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4148 qstats->total_multicast_packets_transmitted_hi,
4149 fstats->total_multicast_packets_transmitted_lo,
4150 qstats->total_multicast_packets_transmitted_lo);
4151 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4152 qstats->total_broadcast_packets_transmitted_hi,
4153 fstats->total_broadcast_packets_transmitted_lo,
4154 qstats->total_broadcast_packets_transmitted_lo);
4155 ADD_64(fstats->valid_bytes_received_hi,
4156 qstats->valid_bytes_received_hi,
4157 fstats->valid_bytes_received_lo,
4158 qstats->valid_bytes_received_lo);
4159
4160 ADD_64(estats->error_bytes_received_hi,
4161 qstats->error_bytes_received_hi,
4162 estats->error_bytes_received_lo,
4163 qstats->error_bytes_received_lo);
4164 ADD_64(estats->etherstatsoverrsizepkts_hi,
4165 qstats->etherstatsoverrsizepkts_hi,
4166 estats->etherstatsoverrsizepkts_lo,
4167 qstats->etherstatsoverrsizepkts_lo);
4168 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4169 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4170 }
4171
4172 ADD_64(fstats->total_bytes_received_hi,
4173 estats->rx_stat_ifhcinbadoctets_hi,
4174 fstats->total_bytes_received_lo,
4175 estats->rx_stat_ifhcinbadoctets_lo);
4176
4177 memcpy(estats, &(fstats->total_bytes_received_hi),
4178 sizeof(struct host_func_stats) - 2*sizeof(u32));
4179
4180 ADD_64(estats->etherstatsoverrsizepkts_hi,
4181 estats->rx_stat_dot3statsframestoolong_hi,
4182 estats->etherstatsoverrsizepkts_lo,
4183 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004184 ADD_64(estats->error_bytes_received_hi,
4185 estats->rx_stat_ifhcinbadoctets_hi,
4186 estats->error_bytes_received_lo,
4187 estats->rx_stat_ifhcinbadoctets_lo);
4188
Eilon Greensteinde832a52009-02-12 08:36:33 +00004189 if (bp->port.pmf) {
4190 estats->mac_filter_discard =
4191 le32_to_cpu(tport->mac_filter_discard);
4192 estats->xxoverflow_discard =
4193 le32_to_cpu(tport->xxoverflow_discard);
4194 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004195 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004196 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4197 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004198
4199 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4200
Eilon Greensteinde832a52009-02-12 08:36:33 +00004201 bp->stats_pending = 0;
4202
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004203 return 0;
4204}
4205
4206static void bnx2x_net_stats_update(struct bnx2x *bp)
4207{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004208 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004209 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004210 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004211
4212 nstats->rx_packets =
4213 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4214 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4215 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4216
4217 nstats->tx_packets =
4218 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4219 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4220 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4221
Eilon Greensteinde832a52009-02-12 08:36:33 +00004222 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004223
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004224 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004225
Eilon Greensteinde832a52009-02-12 08:36:33 +00004226 nstats->rx_dropped = estats->mac_discard;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004227 for_each_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004228 nstats->rx_dropped +=
4229 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4230
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004231 nstats->tx_dropped = 0;
4232
4233 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004234 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004235
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004236 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004237 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004238
4239 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004240 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4241 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4242 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4243 bnx2x_hilo(&estats->brb_truncate_hi);
4244 nstats->rx_crc_errors =
4245 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4246 nstats->rx_frame_errors =
4247 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4248 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004249 nstats->rx_missed_errors = estats->xxoverflow_discard;
4250
4251 nstats->rx_errors = nstats->rx_length_errors +
4252 nstats->rx_over_errors +
4253 nstats->rx_crc_errors +
4254 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004255 nstats->rx_fifo_errors +
4256 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004257
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004258 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004259 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4260 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4261 nstats->tx_carrier_errors =
4262 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004263 nstats->tx_fifo_errors = 0;
4264 nstats->tx_heartbeat_errors = 0;
4265 nstats->tx_window_errors = 0;
4266
4267 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004268 nstats->tx_carrier_errors +
4269 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4270}
4271
4272static void bnx2x_drv_stats_update(struct bnx2x *bp)
4273{
4274 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4275 int i;
4276
4277 estats->driver_xoff = 0;
4278 estats->rx_err_discard_pkt = 0;
4279 estats->rx_skb_alloc_failed = 0;
4280 estats->hw_csum_err = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004281 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004282 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4283
4284 estats->driver_xoff += qstats->driver_xoff;
4285 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4286 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4287 estats->hw_csum_err += qstats->hw_csum_err;
4288 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004289}
4290
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004291static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004292{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004293 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004295 if (*stats_comp != DMAE_COMP_VAL)
4296 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004297
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004298 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004299 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004300
Eilon Greensteinde832a52009-02-12 08:36:33 +00004301 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4302 BNX2X_ERR("storm stats were not updated for 3 times\n");
4303 bnx2x_panic();
4304 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004305 }
4306
Eilon Greensteinde832a52009-02-12 08:36:33 +00004307 bnx2x_net_stats_update(bp);
4308 bnx2x_drv_stats_update(bp);
4309
Joe Perches7995c642010-02-17 15:01:52 +00004310 if (netif_msg_timer(bp)) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004311 struct bnx2x_fastpath *fp0_rx = bp->fp;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004312 struct bnx2x_fastpath *fp0_tx = bp->fp;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004313 struct tstorm_per_client_stats *old_tclient =
4314 &bp->fp->old_tclient;
4315 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004316 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004317 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004318 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004319
Joe Perches7995c642010-02-17 15:01:52 +00004320 netdev_printk(KERN_DEBUG, bp->dev, "\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004321 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4322 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004323 bnx2x_tx_avail(fp0_tx),
4324 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004325 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4326 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004327 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4328 fp0_rx->rx_comp_cons),
4329 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004330 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4331 "brb truncate %u\n",
4332 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4333 qstats->driver_xoff,
4334 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004336 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004337 "mac_discard %u mac_filter_discard %u "
4338 "xxovrflow_discard %u brb_truncate_discard %u "
4339 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004340 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004341 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4342 bnx2x_hilo(&qstats->no_buff_discard_hi),
4343 estats->mac_discard, estats->mac_filter_discard,
4344 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004345 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004346
4347 for_each_queue(bp, i) {
4348 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4349 bnx2x_fp(bp, i, tx_pkt),
4350 bnx2x_fp(bp, i, rx_pkt),
4351 bnx2x_fp(bp, i, rx_calls));
4352 }
4353 }
4354
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004355 bnx2x_hw_stats_post(bp);
4356 bnx2x_storm_stats_post(bp);
4357}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004358
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004359static void bnx2x_port_stats_stop(struct bnx2x *bp)
4360{
4361 struct dmae_command *dmae;
4362 u32 opcode;
4363 int loader_idx = PMF_DMAE_C(bp);
4364 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004365
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004366 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004367
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004368 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4369 DMAE_CMD_C_ENABLE |
4370 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004371#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004372 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004373#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004374 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004375#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004376 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4377 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4378
4379 if (bp->port.port_stx) {
4380
4381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4382 if (bp->func_stx)
4383 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4384 else
4385 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4386 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4387 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4388 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004389 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004390 dmae->len = sizeof(struct host_port_stats) >> 2;
4391 if (bp->func_stx) {
4392 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4393 dmae->comp_addr_hi = 0;
4394 dmae->comp_val = 1;
4395 } else {
4396 dmae->comp_addr_lo =
4397 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4398 dmae->comp_addr_hi =
4399 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4400 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004401
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004402 *stats_comp = 0;
4403 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004404 }
4405
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004406 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004407
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4409 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4410 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4411 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4412 dmae->dst_addr_lo = bp->func_stx >> 2;
4413 dmae->dst_addr_hi = 0;
4414 dmae->len = sizeof(struct host_func_stats) >> 2;
4415 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4416 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4417 dmae->comp_val = DMAE_COMP_VAL;
4418
4419 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004420 }
4421}
4422
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004423static void bnx2x_stats_stop(struct bnx2x *bp)
4424{
4425 int update = 0;
4426
4427 bnx2x_stats_comp(bp);
4428
4429 if (bp->port.pmf)
4430 update = (bnx2x_hw_stats_update(bp) == 0);
4431
4432 update |= (bnx2x_storm_stats_update(bp) == 0);
4433
4434 if (update) {
4435 bnx2x_net_stats_update(bp);
4436
4437 if (bp->port.pmf)
4438 bnx2x_port_stats_stop(bp);
4439
4440 bnx2x_hw_stats_post(bp);
4441 bnx2x_stats_comp(bp);
4442 }
4443}
4444
4445static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4446{
4447}
4448
4449static const struct {
4450 void (*action)(struct bnx2x *bp);
4451 enum bnx2x_stats_state next_state;
4452} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4453/* state event */
4454{
4455/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4456/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4457/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4458/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4459},
4460{
4461/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4462/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4463/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4464/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4465}
4466};
4467
4468static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4469{
4470 enum bnx2x_stats_state state = bp->stats_state;
4471
4472 bnx2x_stats_stm[state][event].action(bp);
4473 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4474
Eilon Greenstein89246652009-08-12 08:23:56 +00004475 /* Make sure the state has been "changed" */
4476 smp_wmb();
4477
Joe Perches7995c642010-02-17 15:01:52 +00004478 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004479 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4480 state, event, bp->stats_state);
4481}
4482
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004483static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4484{
4485 struct dmae_command *dmae;
4486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4487
4488 /* sanity */
4489 if (!bp->port.pmf || !bp->port.port_stx) {
4490 BNX2X_ERR("BUG!\n");
4491 return;
4492 }
4493
4494 bp->executer_idx = 0;
4495
4496 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4497 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4500#ifdef __BIG_ENDIAN
4501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4502#else
4503 DMAE_CMD_ENDIANITY_DW_SWAP |
4504#endif
4505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4507 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4508 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4509 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4510 dmae->dst_addr_hi = 0;
4511 dmae->len = sizeof(struct host_port_stats) >> 2;
4512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4514 dmae->comp_val = DMAE_COMP_VAL;
4515
4516 *stats_comp = 0;
4517 bnx2x_hw_stats_post(bp);
4518 bnx2x_stats_comp(bp);
4519}
4520
4521static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4522{
4523 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4524 int port = BP_PORT(bp);
4525 int func;
4526 u32 func_stx;
4527
4528 /* sanity */
4529 if (!bp->port.pmf || !bp->func_stx) {
4530 BNX2X_ERR("BUG!\n");
4531 return;
4532 }
4533
4534 /* save our func_stx */
4535 func_stx = bp->func_stx;
4536
4537 for (vn = VN_0; vn < vn_max; vn++) {
4538 func = 2*vn + port;
4539
4540 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4541 bnx2x_func_stats_init(bp);
4542 bnx2x_hw_stats_post(bp);
4543 bnx2x_stats_comp(bp);
4544 }
4545
4546 /* restore our func_stx */
4547 bp->func_stx = func_stx;
4548}
4549
4550static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4551{
4552 struct dmae_command *dmae = &bp->stats_dmae;
4553 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4554
4555 /* sanity */
4556 if (!bp->func_stx) {
4557 BNX2X_ERR("BUG!\n");
4558 return;
4559 }
4560
4561 bp->executer_idx = 0;
4562 memset(dmae, 0, sizeof(struct dmae_command));
4563
4564 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4565 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4566 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4567#ifdef __BIG_ENDIAN
4568 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4569#else
4570 DMAE_CMD_ENDIANITY_DW_SWAP |
4571#endif
4572 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4573 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4574 dmae->src_addr_lo = bp->func_stx >> 2;
4575 dmae->src_addr_hi = 0;
4576 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4577 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4578 dmae->len = sizeof(struct host_func_stats) >> 2;
4579 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4580 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4581 dmae->comp_val = DMAE_COMP_VAL;
4582
4583 *stats_comp = 0;
4584 bnx2x_hw_stats_post(bp);
4585 bnx2x_stats_comp(bp);
4586}
4587
4588static void bnx2x_stats_init(struct bnx2x *bp)
4589{
4590 int port = BP_PORT(bp);
4591 int func = BP_FUNC(bp);
4592 int i;
4593
4594 bp->stats_pending = 0;
4595 bp->executer_idx = 0;
4596 bp->stats_counter = 0;
4597
4598 /* port and func stats for management */
4599 if (!BP_NOMCP(bp)) {
4600 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4601 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4602
4603 } else {
4604 bp->port.port_stx = 0;
4605 bp->func_stx = 0;
4606 }
4607 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4608 bp->port.port_stx, bp->func_stx);
4609
4610 /* port stats */
4611 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4612 bp->port.old_nig_stats.brb_discard =
4613 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4614 bp->port.old_nig_stats.brb_truncate =
4615 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4616 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4617 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4618 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4619 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4620
4621 /* function stats */
4622 for_each_queue(bp, i) {
4623 struct bnx2x_fastpath *fp = &bp->fp[i];
4624
4625 memset(&fp->old_tclient, 0,
4626 sizeof(struct tstorm_per_client_stats));
4627 memset(&fp->old_uclient, 0,
4628 sizeof(struct ustorm_per_client_stats));
4629 memset(&fp->old_xclient, 0,
4630 sizeof(struct xstorm_per_client_stats));
4631 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4632 }
4633
4634 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4635 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4636
4637 bp->stats_state = STATS_STATE_DISABLED;
4638
4639 if (bp->port.pmf) {
4640 if (bp->port.port_stx)
4641 bnx2x_port_stats_base_init(bp);
4642
4643 if (bp->func_stx)
4644 bnx2x_func_stats_base_init(bp);
4645
4646 } else if (bp->func_stx)
4647 bnx2x_func_stats_base_update(bp);
4648}
4649
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004650static void bnx2x_timer(unsigned long data)
4651{
4652 struct bnx2x *bp = (struct bnx2x *) data;
4653
4654 if (!netif_running(bp->dev))
4655 return;
4656
4657 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004658 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659
4660 if (poll) {
4661 struct bnx2x_fastpath *fp = &bp->fp[0];
4662 int rc;
4663
Eilon Greenstein7961f792009-03-02 07:59:31 +00004664 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004665 rc = bnx2x_rx_int(fp, 1000);
4666 }
4667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668 if (!BP_NOMCP(bp)) {
4669 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004670 u32 drv_pulse;
4671 u32 mcp_pulse;
4672
4673 ++bp->fw_drv_pulse_wr_seq;
4674 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4675 /* TBD - add SYSTEM_TIME */
4676 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004677 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004678
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004679 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004680 MCP_PULSE_SEQ_MASK);
4681 /* The delta between driver pulse and mcp response
4682 * should be 1 (before mcp response) or 0 (after mcp response)
4683 */
4684 if ((drv_pulse != mcp_pulse) &&
4685 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4686 /* someone lost a heartbeat... */
4687 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4688 drv_pulse, mcp_pulse);
4689 }
4690 }
4691
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07004692 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004693 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004694
Eliezer Tamirf1410642008-02-28 11:51:50 -08004695timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004696 mod_timer(&bp->timer, jiffies + bp->current_interval);
4697}
4698
4699/* end of Statistics */
4700
4701/* nic init */
4702
4703/*
4704 * nic init service functions
4705 */
4706
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004707static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004708{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004709 int port = BP_PORT(bp);
4710
Eilon Greensteinca003922009-08-12 22:53:28 -07004711 /* "CSTORM" */
4712 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4713 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4714 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4715 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4716 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4717 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004718}
4719
Eilon Greenstein5c862842008-08-13 15:51:48 -07004720static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4721 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004722{
4723 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004724 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004725 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004726 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004727
4728 /* USTORM */
4729 section = ((u64)mapping) + offsetof(struct host_status_block,
4730 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004731 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004732
Eilon Greensteinca003922009-08-12 22:53:28 -07004733 REG_WR(bp, BAR_CSTRORM_INTMEM +
4734 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4735 REG_WR(bp, BAR_CSTRORM_INTMEM +
4736 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004737 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004738 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4739 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004740
4741 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004742 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4743 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004744
4745 /* CSTORM */
4746 section = ((u64)mapping) + offsetof(struct host_status_block,
4747 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004748 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004749
4750 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004751 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004752 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004753 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004754 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004755 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004756 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004757
4758 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4759 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004760 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004761
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004762 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4763}
4764
4765static void bnx2x_zero_def_sb(struct bnx2x *bp)
4766{
4767 int func = BP_FUNC(bp);
4768
Eilon Greensteinca003922009-08-12 22:53:28 -07004769 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004770 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4771 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004772 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4773 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4774 sizeof(struct cstorm_def_status_block_u)/4);
4775 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4776 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4777 sizeof(struct cstorm_def_status_block_c)/4);
4778 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004779 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4780 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004781}
4782
4783static void bnx2x_init_def_sb(struct bnx2x *bp,
4784 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004785 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004786{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004787 int port = BP_PORT(bp);
4788 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004789 int index, val, reg_offset;
4790 u64 section;
4791
4792 /* ATTN */
4793 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4794 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004795 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004796
Eliezer Tamir49d66772008-02-28 11:53:13 -08004797 bp->attn_state = 0;
4798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004799 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4800 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4801
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004802 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004803 bp->attn_group[index].sig[0] = REG_RD(bp,
4804 reg_offset + 0x10*index);
4805 bp->attn_group[index].sig[1] = REG_RD(bp,
4806 reg_offset + 0x4 + 0x10*index);
4807 bp->attn_group[index].sig[2] = REG_RD(bp,
4808 reg_offset + 0x8 + 0x10*index);
4809 bp->attn_group[index].sig[3] = REG_RD(bp,
4810 reg_offset + 0xc + 0x10*index);
4811 }
4812
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004813 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4814 HC_REG_ATTN_MSG0_ADDR_L);
4815
4816 REG_WR(bp, reg_offset, U64_LO(section));
4817 REG_WR(bp, reg_offset + 4, U64_HI(section));
4818
4819 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4820
4821 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004822 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823 REG_WR(bp, reg_offset, val);
4824
4825 /* USTORM */
4826 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4827 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004828 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004829
Eilon Greensteinca003922009-08-12 22:53:28 -07004830 REG_WR(bp, BAR_CSTRORM_INTMEM +
4831 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4832 REG_WR(bp, BAR_CSTRORM_INTMEM +
4833 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004834 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004835 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4836 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004837
4838 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004839 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4840 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004841
4842 /* CSTORM */
4843 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4844 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004845 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004846
4847 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004848 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004849 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004850 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004851 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004852 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004853 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004854
4855 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4856 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004857 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004858
4859 /* TSTORM */
4860 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4861 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004862 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004863
4864 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004865 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004866 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004867 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004868 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004869 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004870 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004871
4872 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4873 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004874 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004875
4876 /* XSTORM */
4877 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4878 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004879 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004880
4881 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004882 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004883 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004884 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004885 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004886 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004887 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004888
4889 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4890 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004891 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004892
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004893 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004894 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004895
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004896 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004897}
4898
4899static void bnx2x_update_coalesce(struct bnx2x *bp)
4900{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004901 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004902 int i;
4903
4904 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004905 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004906
4907 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004908 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4909 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4910 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004911 bp->rx_ticks/(4 * BNX2X_BTR));
Eilon Greensteinca003922009-08-12 22:53:28 -07004912 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4913 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4914 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004915 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004916
4917 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4918 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004919 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4920 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004921 bp->tx_ticks/(4 * BNX2X_BTR));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004922 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004923 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4924 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00004925 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004926 }
4927}
4928
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004929static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4930 struct bnx2x_fastpath *fp, int last)
4931{
4932 int i;
4933
4934 for (i = 0; i < last; i++) {
4935 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4936 struct sk_buff *skb = rx_buf->skb;
4937
4938 if (skb == NULL) {
4939 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4940 continue;
4941 }
4942
4943 if (fp->tpa_state[i] == BNX2X_TPA_START)
4944 pci_unmap_single(bp->pdev,
4945 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004946 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004947
4948 dev_kfree_skb(skb);
4949 rx_buf->skb = NULL;
4950 }
4951}
4952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004953static void bnx2x_init_rx_rings(struct bnx2x *bp)
4954{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004955 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004956 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4957 ETH_MAX_AGGREGATION_QUEUES_E1H;
4958 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004959 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004960
Eilon Greenstein87942b42009-02-12 08:36:49 +00004961 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004962 DP(NETIF_MSG_IFUP,
4963 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004964
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004965 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004966
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004967 for_each_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004968 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004969
Eilon Greenstein32626232008-08-13 15:51:07 -07004970 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004971 fp->tpa_pool[i].skb =
4972 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4973 if (!fp->tpa_pool[i].skb) {
4974 BNX2X_ERR("Failed to allocate TPA "
4975 "skb pool for queue[%d] - "
4976 "disabling TPA on this "
4977 "queue!\n", j);
4978 bnx2x_free_tpa_pool(bp, fp, i);
4979 fp->disable_tpa = 1;
4980 break;
4981 }
4982 pci_unmap_addr_set((struct sw_rx_bd *)
4983 &bp->fp->tpa_pool[i],
4984 mapping, 0);
4985 fp->tpa_state[i] = BNX2X_TPA_STOP;
4986 }
4987 }
4988 }
4989
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004990 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004991 struct bnx2x_fastpath *fp = &bp->fp[j];
4992
4993 fp->rx_bd_cons = 0;
4994 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004995 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004996
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004997 /* "next page" elements initialization */
4998 /* SGE ring */
4999 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5000 struct eth_rx_sge *sge;
5001
5002 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5003 sge->addr_hi =
5004 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5005 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5006 sge->addr_lo =
5007 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5008 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5009 }
5010
5011 bnx2x_init_sge_ring_bit_mask(fp);
5012
5013 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005014 for (i = 1; i <= NUM_RX_RINGS; i++) {
5015 struct eth_rx_bd *rx_bd;
5016
5017 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5018 rx_bd->addr_hi =
5019 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005020 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005021 rx_bd->addr_lo =
5022 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005023 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024 }
5025
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005026 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005027 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5028 struct eth_rx_cqe_next_page *nextpg;
5029
5030 nextpg = (struct eth_rx_cqe_next_page *)
5031 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5032 nextpg->addr_hi =
5033 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005034 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005035 nextpg->addr_lo =
5036 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005037 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005038 }
5039
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005040 /* Allocate SGEs and initialize the ring elements */
5041 for (i = 0, ring_prod = 0;
5042 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005043
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005044 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5045 BNX2X_ERR("was only able to allocate "
5046 "%d rx sges\n", i);
5047 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5048 /* Cleanup already allocated elements */
5049 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07005050 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005051 fp->disable_tpa = 1;
5052 ring_prod = 0;
5053 break;
5054 }
5055 ring_prod = NEXT_SGE_IDX(ring_prod);
5056 }
5057 fp->rx_sge_prod = ring_prod;
5058
5059 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005060 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005061 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005062 for (i = 0; i < bp->rx_ring_size; i++) {
5063 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5064 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00005065 "%d rx skbs on queue[%d]\n", i, j);
5066 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005067 break;
5068 }
5069 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005070 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005071 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005072 }
5073
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005074 fp->rx_bd_prod = ring_prod;
5075 /* must not have more available CQEs than BDs */
5076 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5077 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005078 fp->rx_pkt = fp->rx_calls = 0;
5079
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005080 /* Warning!
5081 * this will generate an interrupt (to the TSTORM)
5082 * must only be done after chip is initialized
5083 */
5084 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5085 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005086 if (j != 0)
5087 continue;
5088
5089 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005090 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005091 U64_LO(fp->rx_comp_mapping));
5092 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005093 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005094 U64_HI(fp->rx_comp_mapping));
5095 }
5096}
5097
5098static void bnx2x_init_tx_ring(struct bnx2x *bp)
5099{
5100 int i, j;
5101
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005102 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005103 struct bnx2x_fastpath *fp = &bp->fp[j];
5104
5105 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005106 struct eth_tx_next_bd *tx_next_bd =
5107 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005108
Eilon Greensteinca003922009-08-12 22:53:28 -07005109 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005111 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005112 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005113 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005114 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005115 }
5116
Eilon Greensteinca003922009-08-12 22:53:28 -07005117 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5118 fp->tx_db.data.zero_fill1 = 0;
5119 fp->tx_db.data.prod = 0;
5120
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005121 fp->tx_pkt_prod = 0;
5122 fp->tx_pkt_cons = 0;
5123 fp->tx_bd_prod = 0;
5124 fp->tx_bd_cons = 0;
5125 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5126 fp->tx_pkt = 0;
5127 }
5128}
5129
5130static void bnx2x_init_sp_ring(struct bnx2x *bp)
5131{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005132 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005133
5134 spin_lock_init(&bp->spq_lock);
5135
5136 bp->spq_left = MAX_SPQ_PENDING;
5137 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5139 bp->spq_prod_bd = bp->spq;
5140 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5141
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005142 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005143 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005144 REG_WR(bp,
5145 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005146 U64_HI(bp->spq_mapping));
5147
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005148 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005149 bp->spq_prod_idx);
5150}
5151
5152static void bnx2x_init_context(struct bnx2x *bp)
5153{
5154 int i;
5155
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005156 /* Rx */
5157 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005158 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5159 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005160 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005161
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162 context->ustorm_st_context.common.sb_index_numbers =
5163 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005164 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005165 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005166 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005167 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5168 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5169 context->ustorm_st_context.common.statistics_counter_id =
5170 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005171 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005172 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005173 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005174 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005175 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005176 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005177 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005178 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005179 if (!fp->disable_tpa) {
5180 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005182 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005183 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5184 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005185 context->ustorm_st_context.common.sge_page_base_hi =
5186 U64_HI(fp->rx_sge_mapping);
5187 context->ustorm_st_context.common.sge_page_base_lo =
5188 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005189
5190 context->ustorm_st_context.common.max_sges_for_packet =
5191 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5192 context->ustorm_st_context.common.max_sges_for_packet =
5193 ((context->ustorm_st_context.common.
5194 max_sges_for_packet + PAGES_PER_SGE - 1) &
5195 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005196 }
5197
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005198 context->ustorm_ag_context.cdu_usage =
5199 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5200 CDU_REGION_NUMBER_UCM_AG,
5201 ETH_CONNECTION_TYPE);
5202
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005203 context->xstorm_ag_context.cdu_reserved =
5204 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5205 CDU_REGION_NUMBER_XCM_AG,
5206 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005207 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005208
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005209 /* Tx */
5210 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005211 struct bnx2x_fastpath *fp = &bp->fp[i];
5212 struct eth_context *context =
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005213 bnx2x_sp(bp, context[i].eth);
Eilon Greensteinca003922009-08-12 22:53:28 -07005214
5215 context->cstorm_st_context.sb_index_number =
5216 C_SB_ETH_TX_CQ_INDEX;
5217 context->cstorm_st_context.status_block_id = fp->sb_id;
5218
5219 context->xstorm_st_context.tx_bd_page_base_hi =
5220 U64_HI(fp->tx_desc_mapping);
5221 context->xstorm_st_context.tx_bd_page_base_lo =
5222 U64_LO(fp->tx_desc_mapping);
5223 context->xstorm_st_context.statistics_data = (fp->cl_id |
5224 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5225 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005226}
5227
5228static void bnx2x_init_ind_table(struct bnx2x *bp)
5229{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005230 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231 int i;
5232
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005233 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005234 return;
5235
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005236 DP(NETIF_MSG_IFUP,
5237 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005238 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005239 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005240 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005241 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005242}
5243
Eliezer Tamir49d66772008-02-28 11:53:13 -08005244static void bnx2x_set_client_config(struct bnx2x *bp)
5245{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005246 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005247 int port = BP_PORT(bp);
5248 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005249
Eilon Greensteine7799c52009-01-14 21:30:27 -08005250 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005251 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005252 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5253 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005254#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005255 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005256 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005257 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005258 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5259 }
5260#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005261
5262 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005263 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5264
Eliezer Tamir49d66772008-02-28 11:53:13 -08005265 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005266 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005267 ((u32 *)&tstorm_client)[0]);
5268 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005269 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005270 ((u32 *)&tstorm_client)[1]);
5271 }
5272
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005273 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5274 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005275}
5276
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005277static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5278{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005279 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005280 int mode = bp->rx_mode;
Michael Chan37b091b2009-10-10 13:46:55 +00005281 int mask = bp->rx_mode_cl_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005282 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005283 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005284 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005285 /* All but management unicast packets should pass to the host as well */
5286 u32 llh_mask =
5287 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5288 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5289 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5290 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005291
Eilon Greenstein3196a882008-08-13 15:58:49 -07005292 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005293
5294 switch (mode) {
5295 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005296 tstorm_mac_filter.ucast_drop_all = mask;
5297 tstorm_mac_filter.mcast_drop_all = mask;
5298 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005299 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005300
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005301 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005302 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005304
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005305 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005306 tstorm_mac_filter.mcast_accept_all = mask;
5307 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005308 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005310 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005311 tstorm_mac_filter.ucast_accept_all = mask;
5312 tstorm_mac_filter.mcast_accept_all = mask;
5313 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005314 /* pass management unicast packets as well */
5315 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005317
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005318 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005319 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5320 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005321 }
5322
Eilon Greenstein581ce432009-07-29 00:20:04 +00005323 REG_WR(bp,
5324 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5325 llh_mask);
5326
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005327 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5328 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005329 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005330 ((u32 *)&tstorm_mac_filter)[i]);
5331
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005332/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005333 ((u32 *)&tstorm_mac_filter)[i]); */
5334 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005335
Eliezer Tamir49d66772008-02-28 11:53:13 -08005336 if (mode != BNX2X_RX_MODE_NONE)
5337 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005338}
5339
Eilon Greenstein471de712008-08-13 15:49:35 -07005340static void bnx2x_init_internal_common(struct bnx2x *bp)
5341{
5342 int i;
5343
5344 /* Zero this manually as its initialization is
5345 currently missing in the initTool */
5346 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5347 REG_WR(bp, BAR_USTRORM_INTMEM +
5348 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5349}
5350
5351static void bnx2x_init_internal_port(struct bnx2x *bp)
5352{
5353 int port = BP_PORT(bp);
5354
Eilon Greensteinca003922009-08-12 22:53:28 -07005355 REG_WR(bp,
5356 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5357 REG_WR(bp,
5358 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005359 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5360 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5361}
5362
5363static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005364{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005365 struct tstorm_eth_function_common_config tstorm_config = {0};
5366 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005367 int port = BP_PORT(bp);
5368 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005369 int i, j;
5370 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005371 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005372
5373 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005374 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005375 tstorm_config.rss_result_mask = MULTI_MASK;
5376 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005377
5378 /* Enable TPA if needed */
5379 if (bp->flags & TPA_ENABLE_FLAG)
5380 tstorm_config.config_flags |=
5381 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5382
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005383 if (IS_E1HMF(bp))
5384 tstorm_config.config_flags |=
5385 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005386
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005387 tstorm_config.leading_client_id = BP_L_ID(bp);
5388
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005389 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005390 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005391 (*(u32 *)&tstorm_config));
5392
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005393 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Michael Chan37b091b2009-10-10 13:46:55 +00005394 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005395 bnx2x_set_storm_rx_mode(bp);
5396
Eilon Greensteinde832a52009-02-12 08:36:33 +00005397 for_each_queue(bp, i) {
5398 u8 cl_id = bp->fp[i].cl_id;
5399
5400 /* reset xstorm per client statistics */
5401 offset = BAR_XSTRORM_INTMEM +
5402 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5403 for (j = 0;
5404 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5405 REG_WR(bp, offset + j*4, 0);
5406
5407 /* reset tstorm per client statistics */
5408 offset = BAR_TSTRORM_INTMEM +
5409 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5410 for (j = 0;
5411 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5412 REG_WR(bp, offset + j*4, 0);
5413
5414 /* reset ustorm per client statistics */
5415 offset = BAR_USTRORM_INTMEM +
5416 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5417 for (j = 0;
5418 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5419 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005420 }
5421
5422 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005423 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005424
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005425 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005426 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005427 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005428 ((u32 *)&stats_flags)[1]);
5429
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005430 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005431 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005432 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005433 ((u32 *)&stats_flags)[1]);
5434
Eilon Greensteinde832a52009-02-12 08:36:33 +00005435 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5436 ((u32 *)&stats_flags)[0]);
5437 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5438 ((u32 *)&stats_flags)[1]);
5439
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005440 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005441 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005442 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005443 ((u32 *)&stats_flags)[1]);
5444
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005445 REG_WR(bp, BAR_XSTRORM_INTMEM +
5446 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5447 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5448 REG_WR(bp, BAR_XSTRORM_INTMEM +
5449 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5450 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5451
5452 REG_WR(bp, BAR_TSTRORM_INTMEM +
5453 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5454 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5455 REG_WR(bp, BAR_TSTRORM_INTMEM +
5456 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5457 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005458
Eilon Greensteinde832a52009-02-12 08:36:33 +00005459 REG_WR(bp, BAR_USTRORM_INTMEM +
5460 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5461 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5462 REG_WR(bp, BAR_USTRORM_INTMEM +
5463 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5464 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5465
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005466 if (CHIP_IS_E1H(bp)) {
5467 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5468 IS_E1HMF(bp));
5469 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5470 IS_E1HMF(bp));
5471 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5472 IS_E1HMF(bp));
5473 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5474 IS_E1HMF(bp));
5475
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005476 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5477 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005478 }
5479
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005480 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5481 max_agg_size =
5482 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5483 SGE_PAGE_SIZE * PAGES_PER_SGE),
5484 (u32)0xffff);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005485 for_each_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005486 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005487
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005489 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005490 U64_LO(fp->rx_comp_mapping));
5491 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005492 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005493 U64_HI(fp->rx_comp_mapping));
5494
Eilon Greensteinca003922009-08-12 22:53:28 -07005495 /* Next page */
5496 REG_WR(bp, BAR_USTRORM_INTMEM +
5497 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5498 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5499 REG_WR(bp, BAR_USTRORM_INTMEM +
5500 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5501 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5502
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005503 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005504 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005505 max_agg_size);
5506 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005507
Eilon Greenstein1c063282009-02-12 08:36:43 +00005508 /* dropless flow control */
5509 if (CHIP_IS_E1H(bp)) {
5510 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5511
5512 rx_pause.bd_thr_low = 250;
5513 rx_pause.cqe_thr_low = 250;
5514 rx_pause.cos = 1;
5515 rx_pause.sge_thr_low = 0;
5516 rx_pause.bd_thr_high = 350;
5517 rx_pause.cqe_thr_high = 350;
5518 rx_pause.sge_thr_high = 0;
5519
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005520 for_each_queue(bp, i) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00005521 struct bnx2x_fastpath *fp = &bp->fp[i];
5522
5523 if (!fp->disable_tpa) {
5524 rx_pause.sge_thr_low = 150;
5525 rx_pause.sge_thr_high = 250;
5526 }
5527
5528
5529 offset = BAR_USTRORM_INTMEM +
5530 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5531 fp->cl_id);
5532 for (j = 0;
5533 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5534 j++)
5535 REG_WR(bp, offset + j*4,
5536 ((u32 *)&rx_pause)[j]);
5537 }
5538 }
5539
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005540 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5541
5542 /* Init rate shaping and fairness contexts */
5543 if (IS_E1HMF(bp)) {
5544 int vn;
5545
5546 /* During init there is no active link
5547 Until link is up, set link rate to 10Gbps */
5548 bp->link_vars.line_speed = SPEED_10000;
5549 bnx2x_init_port_minmax(bp);
5550
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005551 if (!BP_NOMCP(bp))
5552 bp->mf_config =
5553 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005554 bnx2x_calc_vn_weight_sum(bp);
5555
5556 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5557 bnx2x_init_vn_minmax(bp, 2*vn + port);
5558
5559 /* Enable rate shaping and fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005560 bp->cmng.flags.cmng_enables |=
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005561 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07005562
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005563 } else {
5564 /* rate shaping and fairness are disabled */
5565 DP(NETIF_MSG_IFUP,
5566 "single function mode minmax will be disabled\n");
5567 }
5568
5569
5570 /* Store it to internal memory */
5571 if (bp->port.pmf)
5572 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5573 REG_WR(bp, BAR_XSTRORM_INTMEM +
5574 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5575 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005576}
5577
Eilon Greenstein471de712008-08-13 15:49:35 -07005578static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5579{
5580 switch (load_code) {
5581 case FW_MSG_CODE_DRV_LOAD_COMMON:
5582 bnx2x_init_internal_common(bp);
5583 /* no break */
5584
5585 case FW_MSG_CODE_DRV_LOAD_PORT:
5586 bnx2x_init_internal_port(bp);
5587 /* no break */
5588
5589 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5590 bnx2x_init_internal_func(bp);
5591 break;
5592
5593 default:
5594 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5595 break;
5596 }
5597}
5598
5599static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005600{
5601 int i;
5602
5603 for_each_queue(bp, i) {
5604 struct bnx2x_fastpath *fp = &bp->fp[i];
5605
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005606 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005607 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005608 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005609 fp->cl_id = BP_L_ID(bp) + i;
Michael Chan37b091b2009-10-10 13:46:55 +00005610#ifdef BCM_CNIC
5611 fp->sb_id = fp->cl_id + 1;
5612#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005613 fp->sb_id = fp->cl_id;
Michael Chan37b091b2009-10-10 13:46:55 +00005614#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005615 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005616 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5617 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005618 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005619 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005620 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005621 }
5622
Eilon Greenstein16119782009-03-02 07:59:27 +00005623 /* ensure status block indices were read */
5624 rmb();
5625
5626
Eilon Greenstein5c862842008-08-13 15:51:48 -07005627 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5628 DEF_SB_ID);
5629 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005630 bnx2x_update_coalesce(bp);
5631 bnx2x_init_rx_rings(bp);
5632 bnx2x_init_tx_ring(bp);
5633 bnx2x_init_sp_ring(bp);
5634 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005635 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005636 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005637 bnx2x_stats_init(bp);
5638
5639 /* At this point, we are ready for interrupts */
5640 atomic_set(&bp->intr_sem, 0);
5641
5642 /* flush all before enabling interrupts */
5643 mb();
5644 mmiowb();
5645
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005646 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005647
5648 /* Check for SPIO5 */
5649 bnx2x_attn_int_deasserted0(bp,
5650 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5651 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005652}
5653
5654/* end of nic init */
5655
5656/*
5657 * gzip service functions
5658 */
5659
5660static int bnx2x_gunzip_init(struct bnx2x *bp)
5661{
5662 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5663 &bp->gunzip_mapping);
5664 if (bp->gunzip_buf == NULL)
5665 goto gunzip_nomem1;
5666
5667 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5668 if (bp->strm == NULL)
5669 goto gunzip_nomem2;
5670
5671 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5672 GFP_KERNEL);
5673 if (bp->strm->workspace == NULL)
5674 goto gunzip_nomem3;
5675
5676 return 0;
5677
5678gunzip_nomem3:
5679 kfree(bp->strm);
5680 bp->strm = NULL;
5681
5682gunzip_nomem2:
5683 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5684 bp->gunzip_mapping);
5685 bp->gunzip_buf = NULL;
5686
5687gunzip_nomem1:
Joe Perches7995c642010-02-17 15:01:52 +00005688 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005689 return -ENOMEM;
5690}
5691
5692static void bnx2x_gunzip_end(struct bnx2x *bp)
5693{
5694 kfree(bp->strm->workspace);
5695
5696 kfree(bp->strm);
5697 bp->strm = NULL;
5698
5699 if (bp->gunzip_buf) {
5700 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5701 bp->gunzip_mapping);
5702 bp->gunzip_buf = NULL;
5703 }
5704}
5705
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005706static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005707{
5708 int n, rc;
5709
5710 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005711 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5712 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005713 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005714 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005715
5716 n = 10;
5717
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005718#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005719
5720 if (zbuf[3] & FNAME)
5721 while ((zbuf[n++] != 0) && (n < len));
5722
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005723 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005724 bp->strm->avail_in = len - n;
5725 bp->strm->next_out = bp->gunzip_buf;
5726 bp->strm->avail_out = FW_BUF_SIZE;
5727
5728 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5729 if (rc != Z_OK)
5730 return rc;
5731
5732 rc = zlib_inflate(bp->strm, Z_FINISH);
5733 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00005734 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5735 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005736
5737 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5738 if (bp->gunzip_outlen & 0x3)
Joe Perches7995c642010-02-17 15:01:52 +00005739 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5740 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005741 bp->gunzip_outlen >>= 2;
5742
5743 zlib_inflateEnd(bp->strm);
5744
5745 if (rc == Z_STREAM_END)
5746 return 0;
5747
5748 return rc;
5749}
5750
5751/* nic load/unload */
5752
5753/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005754 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005755 */
5756
5757/* send a NIG loopback debug packet */
5758static void bnx2x_lb_pckt(struct bnx2x *bp)
5759{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005760 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005761
5762 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005763 wb_write[0] = 0x55555555;
5764 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005765 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005766 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005767
5768 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005769 wb_write[0] = 0x09000000;
5770 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005771 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005772 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005773}
5774
5775/* some of the internal memories
5776 * are not directly readable from the driver
5777 * to test them we send debug packets
5778 */
5779static int bnx2x_int_mem_test(struct bnx2x *bp)
5780{
5781 int factor;
5782 int count, i;
5783 u32 val = 0;
5784
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005785 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005786 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005787 else if (CHIP_REV_IS_EMUL(bp))
5788 factor = 200;
5789 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005790 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005791
5792 DP(NETIF_MSG_HW, "start part1\n");
5793
5794 /* Disable inputs of parser neighbor blocks */
5795 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005798 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005799
5800 /* Write 0 to parser credits for CFC search request */
5801 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803 /* send Ethernet packet */
5804 bnx2x_lb_pckt(bp);
5805
5806 /* TODO do i reset NIG statistic? */
5807 /* Wait until NIG register shows 1 packet of size 0x10 */
5808 count = 1000 * factor;
5809 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005810
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005811 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5812 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005813 if (val == 0x10)
5814 break;
5815
5816 msleep(10);
5817 count--;
5818 }
5819 if (val != 0x10) {
5820 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5821 return -1;
5822 }
5823
5824 /* Wait until PRS register shows 1 packet */
5825 count = 1000 * factor;
5826 while (count) {
5827 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005828 if (val == 1)
5829 break;
5830
5831 msleep(10);
5832 count--;
5833 }
5834 if (val != 0x1) {
5835 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5836 return -2;
5837 }
5838
5839 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005841 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005842 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005843 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005844 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5845 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005846
5847 DP(NETIF_MSG_HW, "part2\n");
5848
5849 /* Disable inputs of parser neighbor blocks */
5850 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5851 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5852 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005853 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005854
5855 /* Write 0 to parser credits for CFC search request */
5856 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5857
5858 /* send 10 Ethernet packets */
5859 for (i = 0; i < 10; i++)
5860 bnx2x_lb_pckt(bp);
5861
5862 /* Wait until NIG register shows 10 + 1
5863 packets of size 11*0x10 = 0xb0 */
5864 count = 1000 * factor;
5865 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005866
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005867 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5868 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005869 if (val == 0xb0)
5870 break;
5871
5872 msleep(10);
5873 count--;
5874 }
5875 if (val != 0xb0) {
5876 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5877 return -3;
5878 }
5879
5880 /* Wait until PRS register shows 2 packets */
5881 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5882 if (val != 2)
5883 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5884
5885 /* Write 1 to parser credits for CFC search request */
5886 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5887
5888 /* Wait until PRS register shows 3 packets */
5889 msleep(10 * factor);
5890 /* Wait until NIG register shows 1 packet of size 0x10 */
5891 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5892 if (val != 3)
5893 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5894
5895 /* clear NIG EOP FIFO */
5896 for (i = 0; i < 11; i++)
5897 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5898 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5899 if (val != 1) {
5900 BNX2X_ERR("clear of NIG failed\n");
5901 return -4;
5902 }
5903
5904 /* Reset and init BRB, PRS, NIG */
5905 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5906 msleep(50);
5907 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5908 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005909 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5910 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005911#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005912 /* set NIC mode */
5913 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5914#endif
5915
5916 /* Enable inputs of parser neighbor blocks */
5917 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5918 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5919 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005920 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921
5922 DP(NETIF_MSG_HW, "done\n");
5923
5924 return 0; /* OK */
5925}
5926
5927static void enable_blocks_attention(struct bnx2x *bp)
5928{
5929 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5930 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5931 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5932 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5933 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5934 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5935 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5936 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5937 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005938/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5939/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005940 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5941 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5942 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005943/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5944/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005945 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5946 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5947 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5948 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005949/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5950/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5951 if (CHIP_REV_IS_FPGA(bp))
5952 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5953 else
5954 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005955 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5956 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5957 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005958/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5959/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005960 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5961 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005962/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5963 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005964}
5965
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005966
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005967static void bnx2x_reset_common(struct bnx2x *bp)
5968{
5969 /* reset_common */
5970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5971 0xd3ffff7f);
5972 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5973}
5974
Eilon Greenstein573f2032009-08-12 08:24:14 +00005975static void bnx2x_init_pxp(struct bnx2x *bp)
5976{
5977 u16 devctl;
5978 int r_order, w_order;
5979
5980 pci_read_config_word(bp->pdev,
5981 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5982 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5983 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5984 if (bp->mrrs == -1)
5985 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5986 else {
5987 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5988 r_order = bp->mrrs;
5989 }
5990
5991 bnx2x_init_pxp_arb(bp, r_order, w_order);
5992}
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00005993
5994static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5995{
5996 u32 val;
5997 u8 port;
5998 u8 is_required = 0;
5999
6000 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6001 SHARED_HW_CFG_FAN_FAILURE_MASK;
6002
6003 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6004 is_required = 1;
6005
6006 /*
6007 * The fan failure mechanism is usually related to the PHY type since
6008 * the power consumption of the board is affected by the PHY. Currently,
6009 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6010 */
6011 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6012 for (port = PORT_0; port < PORT_MAX; port++) {
6013 u32 phy_type =
6014 SHMEM_RD(bp, dev_info.port_hw_config[port].
6015 external_phy_config) &
6016 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6017 is_required |=
6018 ((phy_type ==
6019 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6020 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006021 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6022 (phy_type ==
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006023 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6024 }
6025
6026 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6027
6028 if (is_required == 0)
6029 return;
6030
6031 /* Fan failure is indicated by SPIO 5 */
6032 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6033 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6034
6035 /* set to active low mode */
6036 val = REG_RD(bp, MISC_REG_SPIO_INT);
6037 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6038 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6039 REG_WR(bp, MISC_REG_SPIO_INT, val);
6040
6041 /* enable interrupt to signal the IGU */
6042 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6043 val |= (1 << MISC_REGISTERS_SPIO_5);
6044 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6045}
6046
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006047static int bnx2x_init_common(struct bnx2x *bp)
6048{
6049 u32 val, i;
Michael Chan37b091b2009-10-10 13:46:55 +00006050#ifdef BCM_CNIC
6051 u32 wb_write[2];
6052#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006053
6054 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6055
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00006056 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6058 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006060 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006061 if (CHIP_IS_E1H(bp))
6062 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6063
6064 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6065 msleep(30);
6066 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6067
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006068 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006069 if (CHIP_IS_E1(bp)) {
6070 /* enable HW interrupt from PXP on USDM overflow
6071 bit 16 on INT_MASK_0 */
6072 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006073 }
6074
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006075 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006076 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077
6078#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006079 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6080 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6081 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6082 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6083 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006084 /* make sure this value is 0 */
6085 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006086
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006087/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6088 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6089 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6090 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6091 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006092#endif
6093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006094 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Michael Chan37b091b2009-10-10 13:46:55 +00006095#ifdef BCM_CNIC
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006096 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6097 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6098 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006099#endif
6100
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006101 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6102 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006103
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006104 /* let the HW do it's magic ... */
6105 msleep(100);
6106 /* finish PXP init */
6107 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6108 if (val != 1) {
6109 BNX2X_ERR("PXP2 CFG failed\n");
6110 return -EBUSY;
6111 }
6112 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6113 if (val != 1) {
6114 BNX2X_ERR("PXP2 RD_INIT failed\n");
6115 return -EBUSY;
6116 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006118 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6119 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006120
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006121 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006123 /* clean the DMAE memory */
6124 bp->dmae_ready = 1;
6125 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006126
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006127 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6128 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6129 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6130 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006131
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006132 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6133 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6134 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6135 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6136
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006137 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006138
6139#ifdef BCM_CNIC
6140 wb_write[0] = 0;
6141 wb_write[1] = 0;
6142 for (i = 0; i < 64; i++) {
6143 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6144 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6145
6146 if (CHIP_IS_E1H(bp)) {
6147 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6148 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6149 wb_write, 2);
6150 }
6151 }
6152#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006153 /* soft reset pulse */
6154 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6155 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006156
Michael Chan37b091b2009-10-10 13:46:55 +00006157#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006158 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006159#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006160
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006161 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006162 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6163 if (!CHIP_REV_IS_SLOW(bp)) {
6164 /* enable hw interrupt from doorbell Q */
6165 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6166 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006167
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006168 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6169 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006170 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00006171#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07006172 /* set NIC mode */
6173 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00006174#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006175 if (CHIP_IS_E1H(bp))
6176 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006177
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006178 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6179 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006182
Eilon Greensteinca003922009-08-12 22:53:28 -07006183 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6184 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6185 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6186 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006187
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006188 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6189 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6190 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006192
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006193 /* sync semi rtc */
6194 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6195 0x80000000);
6196 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6197 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006198
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006199 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6200 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6201 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006202
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006203 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6204 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6205 REG_WR(bp, i, 0xc0cac01a);
6206 /* TODO: replace with something meaningful */
6207 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006208 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00006209#ifdef BCM_CNIC
6210 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6211 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6212 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6213 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6214 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6215 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6216 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6217 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6218 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6219 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6220#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006221 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006222
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006223 if (sizeof(union cdu_context) != 1024)
6224 /* we currently assume that a context is 1024 bytes */
Joe Perches7995c642010-02-17 15:01:52 +00006225 pr_alert("please adjust the size of cdu_context(%ld)\n",
6226 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006227
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006228 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006229 val = (4 << 24) + (0 << 12) + 1024;
6230 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006231
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006232 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006233 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006234 /* enable context validation interrupt from CFC */
6235 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6236
6237 /* set the thresholds to prevent CFC/CDU race */
6238 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006239
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006240 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6241 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006242
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006243 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006244 /* Reset PCIE errors for debug */
6245 REG_WR(bp, 0x2814, 0xffffffff);
6246 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006247
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006248 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006249 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006250 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006251 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006253 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006254 if (CHIP_IS_E1H(bp)) {
6255 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6256 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6257 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006258
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006259 if (CHIP_REV_IS_SLOW(bp))
6260 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006262 /* finish CFC init */
6263 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6264 if (val != 1) {
6265 BNX2X_ERR("CFC LL_INIT failed\n");
6266 return -EBUSY;
6267 }
6268 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6269 if (val != 1) {
6270 BNX2X_ERR("CFC AC_INIT failed\n");
6271 return -EBUSY;
6272 }
6273 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6274 if (val != 1) {
6275 BNX2X_ERR("CFC CAM_INIT failed\n");
6276 return -EBUSY;
6277 }
6278 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006279
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006280 /* read NIG statistic
6281 to see if this is our first up since powerup */
6282 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6283 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006284
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006285 /* do internal memory self test */
6286 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6287 BNX2X_ERR("internal mem self test failed\n");
6288 return -EBUSY;
6289 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006290
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006291 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006292 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006295 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006296 bp->port.need_hw_lock = 1;
6297 break;
6298
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006299 default:
6300 break;
6301 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006302
Eilon Greensteinfd4ef40d2009-07-21 05:47:27 +00006303 bnx2x_setup_fan_failure_detection(bp);
6304
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006305 /* clear PXP2 attentions */
6306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006307
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006308 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006309
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006310 if (!BP_NOMCP(bp)) {
6311 bnx2x_acquire_phy_lock(bp);
6312 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6313 bnx2x_release_phy_lock(bp);
6314 } else
6315 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6316
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006317 return 0;
6318}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006319
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006320static int bnx2x_init_port(struct bnx2x *bp)
6321{
6322 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006323 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006324 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006325 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006326
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006327 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6328
6329 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006330
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006331 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006332 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006333
6334 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6335 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6336 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006337 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006338
Michael Chan37b091b2009-10-10 13:46:55 +00006339#ifdef BCM_CNIC
6340 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006341
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006342 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00006343 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6344 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006345#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006346 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006347
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006348 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006349 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6350 /* no pause for emulation and FPGA */
6351 low = 0;
6352 high = 513;
6353 } else {
6354 if (IS_E1HMF(bp))
6355 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6356 else if (bp->dev->mtu > 4096) {
6357 if (bp->flags & ONE_PORT_FLAG)
6358 low = 160;
6359 else {
6360 val = bp->dev->mtu;
6361 /* (24*1024 + val*4)/256 */
6362 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6363 }
6364 } else
6365 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6366 high = low + 56; /* 14*1024/256 */
6367 }
6368 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6369 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6370
6371
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006372 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006373
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006374 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006375 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006376 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006377 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006378
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006379 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6380 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6381 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6382 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006383
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006384 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006385 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006386
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006387 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006388
6389 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006390 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006391
6392 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006393 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006394 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006395 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006396
6397 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006398 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006399 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006400 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006401
Michael Chan37b091b2009-10-10 13:46:55 +00006402#ifdef BCM_CNIC
6403 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006404#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006405 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006406 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006407
6408 if (CHIP_IS_E1(bp)) {
6409 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6410 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6411 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006412 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006413
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006414 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006415 /* init aeu_mask_attn_func_0/1:
6416 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6417 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6418 * bits 4-7 are used for "per vn group attention" */
6419 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6420 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6421
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006422 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006423 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006424 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006425 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006426 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006427
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006428 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006429
6430 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6431
6432 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006433 /* 0x2 disable e1hov, 0x1 enable */
6434 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6435 (IS_E1HMF(bp) ? 0x1 : 0x2));
6436
Eilon Greenstein1c063282009-02-12 08:36:43 +00006437 {
6438 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6439 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6440 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6441 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006442 }
6443
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006444 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006445 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006446
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006447 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6449 {
6450 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6451
6452 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6453 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6454
6455 /* The GPIO should be swapped if the swap register is
6456 set and active */
6457 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6458 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6459
6460 /* Select function upon port-swap configuration */
6461 if (port == 0) {
6462 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6463 aeu_gpio_mask = (swap_val && swap_override) ?
6464 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6465 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6466 } else {
6467 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6468 aeu_gpio_mask = (swap_val && swap_override) ?
6469 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6470 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6471 }
6472 val = REG_RD(bp, offset);
6473 /* add GPIO3 to group */
6474 val |= aeu_gpio_mask;
6475 REG_WR(bp, offset, val);
6476 }
6477 break;
6478
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006480 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006481 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006482 {
6483 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6484 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6485 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006486 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006487 REG_WR(bp, reg_addr, val);
6488 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006489 break;
6490
6491 default:
6492 break;
6493 }
6494
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006495 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006496
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006497 return 0;
6498}
6499
6500#define ILT_PER_FUNC (768/2)
6501#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6502/* the phys address is shifted right 12 bits and has an added
6503 1=valid bit added to the 53rd bit
6504 then since this is a wide register(TM)
6505 we split it into two 32 bit writes
6506 */
6507#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6508#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6509#define PXP_ONE_ILT(x) (((x) << 10) | x)
6510#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6511
Michael Chan37b091b2009-10-10 13:46:55 +00006512#ifdef BCM_CNIC
6513#define CNIC_ILT_LINES 127
6514#define CNIC_CTX_PER_ILT 16
6515#else
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006516#define CNIC_ILT_LINES 0
Michael Chan37b091b2009-10-10 13:46:55 +00006517#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006518
6519static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6520{
6521 int reg;
6522
6523 if (CHIP_IS_E1H(bp))
6524 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6525 else /* E1 */
6526 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6527
6528 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6529}
6530
6531static int bnx2x_init_func(struct bnx2x *bp)
6532{
6533 int port = BP_PORT(bp);
6534 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006535 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006536 int i;
6537
6538 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6539
Eilon Greenstein8badd272009-02-12 08:36:15 +00006540 /* set MSI reconfigure capability */
6541 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6542 val = REG_RD(bp, addr);
6543 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6544 REG_WR(bp, addr, val);
6545
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006546 i = FUNC_ILT_BASE(func);
6547
6548 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6549 if (CHIP_IS_E1H(bp)) {
6550 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6551 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6552 } else /* E1 */
6553 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6554 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6555
Michael Chan37b091b2009-10-10 13:46:55 +00006556#ifdef BCM_CNIC
6557 i += 1 + CNIC_ILT_LINES;
6558 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6573 }
6574
6575 i++;
6576 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6577 if (CHIP_IS_E1(bp))
6578 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6579 else {
6580 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6581 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6582 }
6583
6584 /* tell the searcher where the T2 table is */
6585 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6586
6587 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6588 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6589
6590 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6591 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6592 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6593
6594 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6595#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006596
6597 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein573f2032009-08-12 08:24:14 +00006598 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6599 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6600 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6601 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6602 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6603 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6604 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6605 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6606 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006607
6608 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6609 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6610 }
6611
6612 /* HC init per function */
6613 if (CHIP_IS_E1H(bp)) {
6614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6615
6616 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6617 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6618 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006619 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006620
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006621 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006622 REG_WR(bp, 0x2114, 0xffffffff);
6623 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006624
6625 return 0;
6626}
6627
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006628static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6629{
6630 int i, rc = 0;
6631
6632 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6633 BP_FUNC(bp), load_code);
6634
6635 bp->dmae_ready = 0;
6636 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00006637 rc = bnx2x_gunzip_init(bp);
6638 if (rc)
6639 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006640
6641 switch (load_code) {
6642 case FW_MSG_CODE_DRV_LOAD_COMMON:
6643 rc = bnx2x_init_common(bp);
6644 if (rc)
6645 goto init_hw_err;
6646 /* no break */
6647
6648 case FW_MSG_CODE_DRV_LOAD_PORT:
6649 bp->dmae_ready = 1;
6650 rc = bnx2x_init_port(bp);
6651 if (rc)
6652 goto init_hw_err;
6653 /* no break */
6654
6655 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6656 bp->dmae_ready = 1;
6657 rc = bnx2x_init_func(bp);
6658 if (rc)
6659 goto init_hw_err;
6660 break;
6661
6662 default:
6663 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6664 break;
6665 }
6666
6667 if (!BP_NOMCP(bp)) {
6668 int func = BP_FUNC(bp);
6669
6670 bp->fw_drv_pulse_wr_seq =
6671 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6672 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006673 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6674 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006675
6676 /* this needs to be done before gunzip end */
6677 bnx2x_zero_def_sb(bp);
6678 for_each_queue(bp, i)
6679 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
Michael Chan37b091b2009-10-10 13:46:55 +00006680#ifdef BCM_CNIC
6681 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6682#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006683
6684init_hw_err:
6685 bnx2x_gunzip_end(bp);
6686
6687 return rc;
6688}
6689
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006690static void bnx2x_free_mem(struct bnx2x *bp)
6691{
6692
6693#define BNX2X_PCI_FREE(x, y, size) \
6694 do { \
6695 if (x) { \
6696 pci_free_consistent(bp->pdev, size, x, y); \
6697 x = NULL; \
6698 y = 0; \
6699 } \
6700 } while (0)
6701
6702#define BNX2X_FREE(x) \
6703 do { \
6704 if (x) { \
6705 vfree(x); \
6706 x = NULL; \
6707 } \
6708 } while (0)
6709
6710 int i;
6711
6712 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006713 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006714 for_each_queue(bp, i) {
6715
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006716 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6718 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006719 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006720 }
6721 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006722 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006723
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006724 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006725 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6726 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6727 bnx2x_fp(bp, i, rx_desc_mapping),
6728 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6729
6730 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6731 bnx2x_fp(bp, i, rx_comp_mapping),
6732 sizeof(struct eth_fast_path_rx_cqe) *
6733 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006734
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006735 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006736 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6738 bnx2x_fp(bp, i, rx_sge_mapping),
6739 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6740 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006741 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006742 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006743
6744 /* fastpath tx rings: tx_buf tx_desc */
6745 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6746 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6747 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006748 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006749 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006750 /* end of fastpath */
6751
6752 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006753 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006754
6755 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006756 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006757
Michael Chan37b091b2009-10-10 13:46:55 +00006758#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006759 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6760 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6761 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6762 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006763 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6764 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006765#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006766 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006767
6768#undef BNX2X_PCI_FREE
6769#undef BNX2X_KFREE
6770}
6771
6772static int bnx2x_alloc_mem(struct bnx2x *bp)
6773{
6774
6775#define BNX2X_PCI_ALLOC(x, y, size) \
6776 do { \
6777 x = pci_alloc_consistent(bp->pdev, size, y); \
6778 if (x == NULL) \
6779 goto alloc_mem_err; \
6780 memset(x, 0, size); \
6781 } while (0)
6782
6783#define BNX2X_ALLOC(x, size) \
6784 do { \
6785 x = vmalloc(size); \
6786 if (x == NULL) \
6787 goto alloc_mem_err; \
6788 memset(x, 0, size); \
6789 } while (0)
6790
6791 int i;
6792
6793 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006794 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006795 for_each_queue(bp, i) {
6796 bnx2x_fp(bp, i, bp) = bp;
6797
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006798 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006799 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6800 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006801 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006802 }
6803 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006804 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006805
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006806 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006807 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6808 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6809 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6810 &bnx2x_fp(bp, i, rx_desc_mapping),
6811 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6812
6813 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6814 &bnx2x_fp(bp, i, rx_comp_mapping),
6815 sizeof(struct eth_fast_path_rx_cqe) *
6816 NUM_RCQ_BD);
6817
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006818 /* SGE ring */
6819 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6820 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6821 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6822 &bnx2x_fp(bp, i, rx_sge_mapping),
6823 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006824 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006825 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006826 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006827
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006828 /* fastpath tx rings: tx_buf tx_desc */
6829 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6830 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6832 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006833 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006834 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006835 /* end of fastpath */
6836
6837 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6838 sizeof(struct host_def_status_block));
6839
6840 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6841 sizeof(struct bnx2x_slowpath));
6842
Michael Chan37b091b2009-10-10 13:46:55 +00006843#ifdef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006844 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6845
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006846 /* allocate searcher T2 table
6847 we allocate 1/4 of alloc num for T2
6848 (which is not entered into the ILT) */
6849 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6850
Michael Chan37b091b2009-10-10 13:46:55 +00006851 /* Initialize T2 (for 1024 connections) */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006852 for (i = 0; i < 16*1024; i += 64)
Michael Chan37b091b2009-10-10 13:46:55 +00006853 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006854
Michael Chan37b091b2009-10-10 13:46:55 +00006855 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006856 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6857
6858 /* QM queues (128*MAX_CONN) */
6859 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
Michael Chan37b091b2009-10-10 13:46:55 +00006860
6861 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6862 sizeof(struct host_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006863#endif
6864
6865 /* Slow path ring */
6866 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6867
6868 return 0;
6869
6870alloc_mem_err:
6871 bnx2x_free_mem(bp);
6872 return -ENOMEM;
6873
6874#undef BNX2X_PCI_ALLOC
6875#undef BNX2X_ALLOC
6876}
6877
6878static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6879{
6880 int i;
6881
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006882 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006883 struct bnx2x_fastpath *fp = &bp->fp[i];
6884
6885 u16 bd_cons = fp->tx_bd_cons;
6886 u16 sw_prod = fp->tx_pkt_prod;
6887 u16 sw_cons = fp->tx_pkt_cons;
6888
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006889 while (sw_cons != sw_prod) {
6890 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6891 sw_cons++;
6892 }
6893 }
6894}
6895
6896static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6897{
6898 int i, j;
6899
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006900 for_each_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006901 struct bnx2x_fastpath *fp = &bp->fp[j];
6902
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006903 for (i = 0; i < NUM_RX_BD; i++) {
6904 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6905 struct sk_buff *skb = rx_buf->skb;
6906
6907 if (skb == NULL)
6908 continue;
6909
6910 pci_unmap_single(bp->pdev,
6911 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006912 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006913
6914 rx_buf->skb = NULL;
6915 dev_kfree_skb(skb);
6916 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006917 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006918 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6919 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006920 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006921 }
6922}
6923
6924static void bnx2x_free_skbs(struct bnx2x *bp)
6925{
6926 bnx2x_free_tx_skbs(bp);
6927 bnx2x_free_rx_skbs(bp);
6928}
6929
6930static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6931{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006932 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006933
6934 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006935 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006936 bp->msix_table[0].vector);
6937
Michael Chan37b091b2009-10-10 13:46:55 +00006938#ifdef BCM_CNIC
6939 offset++;
6940#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006941 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006942 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006943 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006944 bnx2x_fp(bp, i, state));
6945
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006946 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006947 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006948}
6949
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006950static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006951{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006952 if (bp->flags & USING_MSIX_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006953 if (!disable_only)
6954 bnx2x_free_msix_irqs(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006955 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006956 bp->flags &= ~USING_MSIX_FLAG;
6957
Eilon Greenstein8badd272009-02-12 08:36:15 +00006958 } else if (bp->flags & USING_MSI_FLAG) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006959 if (!disable_only)
6960 free_irq(bp->pdev->irq, bp->dev);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006961 pci_disable_msi(bp->pdev);
6962 bp->flags &= ~USING_MSI_FLAG;
6963
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00006964 } else if (!disable_only)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006965 free_irq(bp->pdev->irq, bp->dev);
6966}
6967
6968static int bnx2x_enable_msix(struct bnx2x *bp)
6969{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006970 int i, rc, offset = 1;
6971 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006972
Eilon Greenstein8badd272009-02-12 08:36:15 +00006973 bp->msix_table[0].entry = igu_vec;
6974 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006975
Michael Chan37b091b2009-10-10 13:46:55 +00006976#ifdef BCM_CNIC
6977 igu_vec = BP_L_ID(bp) + offset;
6978 bp->msix_table[1].entry = igu_vec;
6979 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6980 offset++;
6981#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006982 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006983 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006984 bp->msix_table[i + offset].entry = igu_vec;
6985 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6986 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006987 }
6988
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006989 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006990 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006991 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006992 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6993 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006994 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006995
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006996 bp->flags |= USING_MSIX_FLAG;
6997
6998 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006999}
7000
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007001static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7002{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007003 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007005 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7006 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007007 if (rc) {
7008 BNX2X_ERR("request sp irq failed\n");
7009 return -EBUSY;
7010 }
7011
Michael Chan37b091b2009-10-10 13:46:55 +00007012#ifdef BCM_CNIC
7013 offset++;
7014#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007015 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007016 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007017 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7018 bp->dev->name, i);
Eilon Greensteinca003922009-08-12 22:53:28 -07007019
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007020 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007021 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007022 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007023 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007024 bnx2x_free_msix_irqs(bp);
7025 return -EBUSY;
7026 }
7027
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007028 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007029 }
7030
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007031 i = BNX2X_NUM_QUEUES(bp);
Joe Perches7995c642010-02-17 15:01:52 +00007032 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7033 bp->msix_table[0].vector,
7034 0, bp->msix_table[offset].vector,
7035 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007037 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007038}
7039
Eilon Greenstein8badd272009-02-12 08:36:15 +00007040static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007041{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007042 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007043
Eilon Greenstein8badd272009-02-12 08:36:15 +00007044 rc = pci_enable_msi(bp->pdev);
7045 if (rc) {
7046 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7047 return -1;
7048 }
7049 bp->flags |= USING_MSI_FLAG;
7050
7051 return 0;
7052}
7053
7054static int bnx2x_req_irq(struct bnx2x *bp)
7055{
7056 unsigned long flags;
7057 int rc;
7058
7059 if (bp->flags & USING_MSI_FLAG)
7060 flags = 0;
7061 else
7062 flags = IRQF_SHARED;
7063
7064 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007065 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007066 if (!rc)
7067 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7068
7069 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007070}
7071
Yitchak Gertner65abd742008-08-25 15:26:24 -07007072static void bnx2x_napi_enable(struct bnx2x *bp)
7073{
7074 int i;
7075
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007076 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007077 napi_enable(&bnx2x_fp(bp, i, napi));
7078}
7079
7080static void bnx2x_napi_disable(struct bnx2x *bp)
7081{
7082 int i;
7083
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007084 for_each_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007085 napi_disable(&bnx2x_fp(bp, i, napi));
7086}
7087
7088static void bnx2x_netif_start(struct bnx2x *bp)
7089{
Eilon Greensteine1510702009-07-21 05:47:41 +00007090 int intr_sem;
7091
7092 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7093 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7094
7095 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007096 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007097 bnx2x_napi_enable(bp);
7098 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007099 if (bp->state == BNX2X_STATE_OPEN)
7100 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007101 }
7102 }
7103}
7104
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007105static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007106{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007107 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007108 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00007109 netif_tx_disable(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007110}
7111
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007112/*
7113 * Init service functions
7114 */
7115
Michael Chane665bfd2009-10-10 13:46:54 +00007116/**
7117 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7118 *
7119 * @param bp driver descriptor
7120 * @param set set or clear an entry (1 or 0)
7121 * @param mac pointer to a buffer containing a MAC
7122 * @param cl_bit_vec bit vector of clients to register a MAC for
7123 * @param cam_offset offset in a CAM to use
7124 * @param with_bcast set broadcast MAC as well
7125 */
7126static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7127 u32 cl_bit_vec, u8 cam_offset,
7128 u8 with_bcast)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007129{
7130 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007131 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007132
7133 /* CAM allocation
7134 * unicasts 0-31:port0 32-63:port1
7135 * multicast 64-127:port0 128-191:port1
7136 */
Michael Chane665bfd2009-10-10 13:46:54 +00007137 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7138 config->hdr.offset = cam_offset;
7139 config->hdr.client_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007140 config->hdr.reserved1 = 0;
7141
7142 /* primary MAC */
7143 config->config_table[0].cam_entry.msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007144 swab16(*(u16 *)&mac[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007145 config->config_table[0].cam_entry.middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007146 swab16(*(u16 *)&mac[2]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007147 config->config_table[0].cam_entry.lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007148 swab16(*(u16 *)&mac[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007149 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007150 if (set)
7151 config->config_table[0].target_table_entry.flags = 0;
7152 else
7153 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007154 config->config_table[0].target_table_entry.clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007155 cpu_to_le32(cl_bit_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007156 config->config_table[0].target_table_entry.vlan_id = 0;
7157
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007158 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7159 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007160 config->config_table[0].cam_entry.msb_mac_addr,
7161 config->config_table[0].cam_entry.middle_mac_addr,
7162 config->config_table[0].cam_entry.lsb_mac_addr);
7163
7164 /* broadcast */
Michael Chane665bfd2009-10-10 13:46:54 +00007165 if (with_bcast) {
7166 config->config_table[1].cam_entry.msb_mac_addr =
7167 cpu_to_le16(0xffff);
7168 config->config_table[1].cam_entry.middle_mac_addr =
7169 cpu_to_le16(0xffff);
7170 config->config_table[1].cam_entry.lsb_mac_addr =
7171 cpu_to_le16(0xffff);
7172 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7173 if (set)
7174 config->config_table[1].target_table_entry.flags =
7175 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7176 else
7177 CAM_INVALIDATE(config->config_table[1]);
7178 config->config_table[1].target_table_entry.clients_bit_vector =
7179 cpu_to_le32(cl_bit_vec);
7180 config->config_table[1].target_table_entry.vlan_id = 0;
7181 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007182
7183 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7184 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7185 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7186}
7187
Michael Chane665bfd2009-10-10 13:46:54 +00007188/**
7189 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7190 *
7191 * @param bp driver descriptor
7192 * @param set set or clear an entry (1 or 0)
7193 * @param mac pointer to a buffer containing a MAC
7194 * @param cl_bit_vec bit vector of clients to register a MAC for
7195 * @param cam_offset offset in a CAM to use
7196 */
7197static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7198 u32 cl_bit_vec, u8 cam_offset)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007199{
7200 struct mac_configuration_cmd_e1h *config =
7201 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7202
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007203 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00007204 config->hdr.offset = cam_offset;
7205 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007206 config->hdr.reserved1 = 0;
7207
7208 /* primary MAC */
7209 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007210 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007211 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007212 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007213 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00007214 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007215 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00007216 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007217 config->config_table[0].vlan_id = 0;
7218 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007219 if (set)
7220 config->config_table[0].flags = BP_PORT(bp);
7221 else
7222 config->config_table[0].flags =
7223 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007224
Michael Chane665bfd2009-10-10 13:46:54 +00007225 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007226 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007227 config->config_table[0].msb_mac_addr,
7228 config->config_table[0].middle_mac_addr,
Michael Chane665bfd2009-10-10 13:46:54 +00007229 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007230
7231 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7232 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7233 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7234}
7235
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007236static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7237 int *state_p, int poll)
7238{
7239 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007240 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007241
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007242 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7243 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007244
7245 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007246 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007247 if (poll) {
7248 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007249 /* if index is different from 0
7250 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007251 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007252 */
7253 if (idx)
7254 bnx2x_rx_int(&bp->fp[idx], 10);
7255 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007256
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007257 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007258 if (*state_p == state) {
7259#ifdef BNX2X_STOP_ON_ERROR
7260 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7261#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007262 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007263 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007264
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007265 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00007266
7267 if (bp->panic)
7268 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007269 }
7270
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007271 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007272 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7273 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007274#ifdef BNX2X_STOP_ON_ERROR
7275 bnx2x_panic();
7276#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007277
Eliezer Tamir49d66772008-02-28 11:53:13 -08007278 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007279}
7280
Michael Chane665bfd2009-10-10 13:46:54 +00007281static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7282{
7283 bp->set_mac_pending++;
7284 smp_wmb();
7285
7286 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7287 (1 << bp->fp->cl_id), BP_FUNC(bp));
7288
7289 /* Wait for a completion */
7290 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7291}
7292
7293static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7294{
7295 bp->set_mac_pending++;
7296 smp_wmb();
7297
7298 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7299 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7300 1);
7301
7302 /* Wait for a completion */
7303 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7304}
7305
Michael Chan993ac7b2009-10-10 13:46:56 +00007306#ifdef BCM_CNIC
7307/**
7308 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7309 * MAC(s). This function will wait until the ramdord completion
7310 * returns.
7311 *
7312 * @param bp driver handle
7313 * @param set set or clear the CAM entry
7314 *
7315 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7316 */
7317static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7318{
7319 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7320
7321 bp->set_mac_pending++;
7322 smp_wmb();
7323
7324 /* Send a SET_MAC ramrod */
7325 if (CHIP_IS_E1(bp))
7326 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7328 1);
7329 else
7330 /* CAM allocation for E1H
7331 * unicasts: by func number
7332 * multicast: 20+FUNC*20, 20 each
7333 */
7334 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7335 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7336
7337 /* Wait for a completion when setting */
7338 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7339
7340 return 0;
7341}
7342#endif
7343
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007344static int bnx2x_setup_leading(struct bnx2x *bp)
7345{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007346 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007347
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007348 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007349 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007350
7351 /* SETUP ramrod */
7352 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7353
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007354 /* Wait for completion */
7355 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007356
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007357 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007358}
7359
7360static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7361{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007362 struct bnx2x_fastpath *fp = &bp->fp[index];
7363
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007364 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007365 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007366
Eliezer Tamir228241e2008-02-28 11:56:57 -08007367 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007368 fp->state = BNX2X_FP_STATE_OPENING;
7369 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7370 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007371
7372 /* Wait for completion */
7373 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007374 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007375}
7376
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007377static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007378
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007379static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007380{
Eilon Greensteinca003922009-08-12 22:53:28 -07007381
7382 switch (bp->multi_mode) {
7383 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007384 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007385 break;
7386
7387 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007388 if (num_queues)
7389 bp->num_queues = min_t(u32, num_queues,
7390 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007391 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007392 bp->num_queues = min_t(u32, num_online_cpus(),
7393 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07007394 break;
7395
7396
7397 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007398 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007399 break;
7400 }
Eilon Greensteinca003922009-08-12 22:53:28 -07007401}
7402
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007403static int bnx2x_set_num_queues(struct bnx2x *bp)
Eilon Greensteinca003922009-08-12 22:53:28 -07007404{
7405 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007406
Eilon Greenstein8badd272009-02-12 08:36:15 +00007407 switch (int_mode) {
7408 case INT_MODE_INTx:
7409 case INT_MODE_MSI:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007410 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07007411 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007412 break;
7413
7414 case INT_MODE_MSIX:
7415 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007416 /* Set number of queues according to bp->multi_mode value */
7417 bnx2x_set_num_queues_msix(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07007418
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007419 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7420 bp->num_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007421
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007422 /* if we can't use MSI-X we only need one fp,
7423 * so try to enable MSI-X with the requested number of fp's
7424 * and fallback to MSI or legacy INTx with one fp
7425 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007426 rc = bnx2x_enable_msix(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007427 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007428 /* failed to enable MSI-X */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007429 bp->num_queues = 1;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007430 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007431 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007432 bp->dev->real_num_tx_queues = bp->num_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007433 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007434}
7435
Michael Chan993ac7b2009-10-10 13:46:56 +00007436#ifdef BCM_CNIC
7437static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7438static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7439#endif
Eilon Greenstein8badd272009-02-12 08:36:15 +00007440
7441/* must be called with rtnl_lock */
7442static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7443{
7444 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007445 int i, rc;
7446
Eilon Greenstein8badd272009-02-12 08:36:15 +00007447#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007448 if (unlikely(bp->panic))
7449 return -EPERM;
7450#endif
7451
7452 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7453
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007454 rc = bnx2x_set_num_queues(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007455
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007456 if (bnx2x_alloc_mem(bp)) {
7457 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007458 return -ENOMEM;
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007459 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007460
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007461 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007462 bnx2x_fp(bp, i, disable_tpa) =
7463 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7464
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007465 for_each_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007466 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7467 bnx2x_poll, 128);
7468
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007469 bnx2x_napi_enable(bp);
7470
7471 if (bp->flags & USING_MSIX_FLAG) {
7472 rc = bnx2x_req_msix_irqs(bp);
7473 if (rc) {
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007474 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007475 goto load_error1;
7476 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007477 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007478 /* Fall to INTx if failed to enable MSI-X due to lack of
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007479 memory (in bnx2x_set_num_queues()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007480 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7481 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007482 bnx2x_ack_int(bp);
7483 rc = bnx2x_req_irq(bp);
7484 if (rc) {
7485 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007486 bnx2x_free_irq(bp, true);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007487 goto load_error1;
7488 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007489 if (bp->flags & USING_MSI_FLAG) {
7490 bp->dev->irq = bp->pdev->irq;
Joe Perches7995c642010-02-17 15:01:52 +00007491 netdev_info(bp->dev, "using MSI IRQ %d\n",
7492 bp->pdev->irq);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007493 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007494 }
7495
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007496 /* Send LOAD_REQUEST command to MCP
7497 Returns the type of LOAD command:
7498 if it is the first port to be initialized
7499 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007500 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007501 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007502 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7503 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007504 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007505 rc = -EBUSY;
7506 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007507 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007508 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7509 rc = -EBUSY; /* other port in diagnostic mode */
7510 goto load_error2;
7511 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007512
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007513 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007514 int port = BP_PORT(bp);
7515
Eilon Greensteinf5372252009-02-12 08:38:30 +00007516 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007517 load_count[0], load_count[1], load_count[2]);
7518 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007519 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007520 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007521 load_count[0], load_count[1], load_count[2]);
7522 if (load_count[0] == 1)
7523 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007524 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007525 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7526 else
7527 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007528 }
7529
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007530 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7531 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7532 bp->port.pmf = 1;
7533 else
7534 bp->port.pmf = 0;
7535 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7536
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007537 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007538 rc = bnx2x_init_hw(bp, load_code);
7539 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007540 BNX2X_ERR("HW init failed, aborting\n");
Vladislav Zolotarovf1e1a192010-02-17 02:03:33 +00007541 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7542 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7543 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007544 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007545 }
7546
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007547 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007548 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007549
Eilon Greenstein2691d512009-08-12 08:22:08 +00007550 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7551 (bp->common.shmem2_base))
7552 SHMEM2_WR(bp, dcc_support,
7553 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7554 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7555
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007556 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007557 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007558 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7559 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007560 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007561 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007562 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007563 }
7564 }
7565
7566 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7567
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007568 rc = bnx2x_setup_leading(bp);
7569 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007570 BNX2X_ERR("Setup leading failed!\n");
Eilon Greensteine3553b22009-08-12 08:23:31 +00007571#ifndef BNX2X_STOP_ON_ERROR
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007572 goto load_error3;
Eilon Greensteine3553b22009-08-12 08:23:31 +00007573#else
7574 bp->panic = 1;
7575 return -EBUSY;
7576#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007577 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007578
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007579 if (CHIP_IS_E1H(bp))
7580 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007581 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07007582 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007583 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007584
Eilon Greensteinca003922009-08-12 22:53:28 -07007585 if (bp->state == BNX2X_STATE_OPEN) {
Michael Chan37b091b2009-10-10 13:46:55 +00007586#ifdef BCM_CNIC
7587 /* Enable Timer scan */
7588 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7589#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007590 for_each_nondefault_queue(bp, i) {
7591 rc = bnx2x_setup_multi(bp, i);
7592 if (rc)
Michael Chan37b091b2009-10-10 13:46:55 +00007593#ifdef BCM_CNIC
7594 goto load_error4;
7595#else
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007596 goto load_error3;
Michael Chan37b091b2009-10-10 13:46:55 +00007597#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007598 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007599
Eilon Greensteinca003922009-08-12 22:53:28 -07007600 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +00007601 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greensteinca003922009-08-12 22:53:28 -07007602 else
Michael Chane665bfd2009-10-10 13:46:54 +00007603 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Michael Chan993ac7b2009-10-10 13:46:56 +00007604#ifdef BCM_CNIC
7605 /* Set iSCSI L2 MAC */
7606 mutex_lock(&bp->cnic_mutex);
7607 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7608 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7609 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
Michael Chan4a6e47a2009-12-25 17:13:07 -08007610 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7611 CNIC_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00007612 }
7613 mutex_unlock(&bp->cnic_mutex);
7614#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07007615 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007616
7617 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007618 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007619
7620 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007621 switch (load_mode) {
7622 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007623 if (bp->state == BNX2X_STATE_OPEN) {
7624 /* Tx queue should be only reenabled */
7625 netif_tx_wake_all_queues(bp->dev);
7626 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007627 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007628 bnx2x_set_rx_mode(bp->dev);
7629 break;
7630
7631 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007632 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007633 if (bp->state != BNX2X_STATE_OPEN)
7634 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007635 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007636 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007637 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007638
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007639 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007640 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007641 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007642 bp->state = BNX2X_STATE_DIAG;
7643 break;
7644
7645 default:
7646 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007647 }
7648
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007649 if (!bp->port.pmf)
7650 bnx2x__link_status_update(bp);
7651
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007652 /* start the timer */
7653 mod_timer(&bp->timer, jiffies + bp->current_interval);
7654
Michael Chan993ac7b2009-10-10 13:46:56 +00007655#ifdef BCM_CNIC
7656 bnx2x_setup_cnic_irq_info(bp);
7657 if (bp->state == BNX2X_STATE_OPEN)
7658 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7659#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007660
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007661 return 0;
7662
Michael Chan37b091b2009-10-10 13:46:55 +00007663#ifdef BCM_CNIC
7664load_error4:
7665 /* Disable Timer scan */
7666 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7667#endif
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007668load_error3:
7669 bnx2x_int_disable_sync(bp, 1);
7670 if (!BP_NOMCP(bp)) {
7671 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7672 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7673 }
7674 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007675 /* Free SKBs, SGEs, TPA pool and driver internals */
7676 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007677 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007678 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007679load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007680 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007681 bnx2x_free_irq(bp, false);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007682load_error1:
7683 bnx2x_napi_disable(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007684 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007685 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007686 bnx2x_free_mem(bp);
7687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007688 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007689}
7690
7691static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7692{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007693 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007694 int rc;
7695
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007696 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007697 fp->state = BNX2X_FP_STATE_HALTING;
7698 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007699
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007700 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007701 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007702 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007703 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007704 return rc;
7705
7706 /* delete cfc entry */
7707 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7708
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007709 /* Wait for completion */
7710 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007711 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007712 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007713}
7714
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007715static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007716{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007717 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007718 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007719 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007720 int cnt = 500;
7721 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007722
7723 might_sleep();
7724
7725 /* Send HALT ramrod */
7726 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007727 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007729 /* Wait for completion */
7730 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7731 &(bp->fp[0].state), 1);
7732 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007733 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007734
Eliezer Tamir49d66772008-02-28 11:53:13 -08007735 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007736
Eliezer Tamir228241e2008-02-28 11:56:57 -08007737 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007738 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7739
Eliezer Tamir49d66772008-02-28 11:53:13 -08007740 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007741 we are going to reset the chip anyway
7742 so there is not much to do if this times out
7743 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007744 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007745 if (!cnt) {
7746 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7747 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7748 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7749#ifdef BNX2X_STOP_ON_ERROR
7750 bnx2x_panic();
7751#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007752 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007753 break;
7754 }
7755 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007756 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007757 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007758 }
7759 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7760 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007761
7762 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007763}
7764
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007765static void bnx2x_reset_func(struct bnx2x *bp)
7766{
7767 int port = BP_PORT(bp);
7768 int func = BP_FUNC(bp);
7769 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007770
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007771 /* Configure IGU */
7772 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7773 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7774
Michael Chan37b091b2009-10-10 13:46:55 +00007775#ifdef BCM_CNIC
7776 /* Disable Timer scan */
7777 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7778 /*
7779 * Wait for at least 10ms and up to 2 second for the timers scan to
7780 * complete
7781 */
7782 for (i = 0; i < 200; i++) {
7783 msleep(10);
7784 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7785 break;
7786 }
7787#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007788 /* Clear ILT */
7789 base = FUNC_ILT_BASE(func);
7790 for (i = base; i < base + ILT_PER_FUNC; i++)
7791 bnx2x_ilt_wr(bp, i, 0);
7792}
7793
7794static void bnx2x_reset_port(struct bnx2x *bp)
7795{
7796 int port = BP_PORT(bp);
7797 u32 val;
7798
7799 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7800
7801 /* Do not rcv packets to BRB */
7802 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7803 /* Do not direct rcv packets that are not for MCP to the BRB */
7804 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7805 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7806
7807 /* Configure AEU */
7808 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7809
7810 msleep(100);
7811 /* Check for BRB port occupancy */
7812 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7813 if (val)
7814 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007815 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007816
7817 /* TODO: Close Doorbell port? */
7818}
7819
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007820static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7821{
7822 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7823 BP_FUNC(bp), reset_code);
7824
7825 switch (reset_code) {
7826 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7827 bnx2x_reset_port(bp);
7828 bnx2x_reset_func(bp);
7829 bnx2x_reset_common(bp);
7830 break;
7831
7832 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7833 bnx2x_reset_port(bp);
7834 bnx2x_reset_func(bp);
7835 break;
7836
7837 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7838 bnx2x_reset_func(bp);
7839 break;
7840
7841 default:
7842 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7843 break;
7844 }
7845}
7846
Eilon Greenstein33471622008-08-13 15:59:08 -07007847/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007848static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007849{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007850 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007851 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007852 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007853
Michael Chan993ac7b2009-10-10 13:46:56 +00007854#ifdef BCM_CNIC
7855 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7856#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007857 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7858
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007859 /* Set "drop all" */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007860 bp->rx_mode = BNX2X_RX_MODE_NONE;
7861 bnx2x_set_storm_rx_mode(bp);
7862
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007863 /* Disable HW interrupts, NAPI and Tx */
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007864 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007865
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007866 del_timer_sync(&bp->timer);
7867 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7868 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007869 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007870
Eilon Greenstein70b99862009-01-14 06:43:48 +00007871 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00007872 bnx2x_free_irq(bp, false);
Eilon Greenstein70b99862009-01-14 06:43:48 +00007873
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007874 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00007875 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007876 struct bnx2x_fastpath *fp = &bp->fp[i];
7877
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007878 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007879 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007880
Eilon Greenstein7961f792009-03-02 07:59:31 +00007881 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007882 if (!cnt) {
7883 BNX2X_ERR("timeout waiting for queue[%d]\n",
7884 i);
7885#ifdef BNX2X_STOP_ON_ERROR
7886 bnx2x_panic();
7887 return -EBUSY;
7888#else
7889 break;
7890#endif
7891 }
7892 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007893 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007894 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007895 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007896 /* Give HW time to discard old tx messages */
7897 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007898
Yitchak Gertner65abd742008-08-25 15:26:24 -07007899 if (CHIP_IS_E1(bp)) {
7900 struct mac_configuration_cmd *config =
7901 bnx2x_sp(bp, mcast_config);
7902
Michael Chane665bfd2009-10-10 13:46:54 +00007903 bnx2x_set_eth_mac_addr_e1(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007904
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007905 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007906 CAM_INVALIDATE(config->config_table[i]);
7907
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007908 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007909 if (CHIP_REV_IS_SLOW(bp))
7910 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7911 else
7912 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007913 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007914 config->hdr.reserved1 = 0;
7915
Michael Chane665bfd2009-10-10 13:46:54 +00007916 bp->set_mac_pending++;
7917 smp_wmb();
7918
Yitchak Gertner65abd742008-08-25 15:26:24 -07007919 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7920 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7921 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7922
7923 } else { /* E1H */
7924 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7925
Michael Chane665bfd2009-10-10 13:46:54 +00007926 bnx2x_set_eth_mac_addr_e1h(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007927
7928 for (i = 0; i < MC_HASH_SIZE; i++)
7929 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007930
7931 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007932 }
Michael Chan993ac7b2009-10-10 13:46:56 +00007933#ifdef BCM_CNIC
7934 /* Clear iSCSI L2 MAC */
7935 mutex_lock(&bp->cnic_mutex);
7936 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7937 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7938 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7939 }
7940 mutex_unlock(&bp->cnic_mutex);
7941#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007942
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007943 if (unload_mode == UNLOAD_NORMAL)
7944 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007945
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007946 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007947 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007948
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007949 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007950 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007951 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007952 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007953 /* The mac address is written to entries 1-4 to
7954 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007955 u8 entry = (BP_E1HVN(bp) + 1)*8;
7956
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007957 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007958 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007959
7960 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7961 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007962 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007963
7964 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007965
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007966 } else
7967 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7968
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007969 /* Close multi and leading connections
7970 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007971 for_each_nondefault_queue(bp, i)
7972 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007973 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007974
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007975 rc = bnx2x_stop_leading(bp);
7976 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007977 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007978#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007979 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007980#else
7981 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007982#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007983 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007984
Eliezer Tamir228241e2008-02-28 11:56:57 -08007985unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007986 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007987 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007988 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007989 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007990 load_count[0], load_count[1], load_count[2]);
7991 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007992 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007993 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007994 load_count[0], load_count[1], load_count[2]);
7995 if (load_count[0] == 0)
7996 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007997 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007998 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7999 else
8000 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8001 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008003 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8004 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8005 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008006
8007 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08008008 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008009
8010 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008011 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008012 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00008013
Eilon Greenstein9a035442008-11-03 16:45:55 -08008014 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008015
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008016 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008017 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008018 for_each_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07008019 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008020 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008021 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008022 bnx2x_free_mem(bp);
8023
8024 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08008025
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008026 netif_carrier_off(bp->dev);
8027
8028 return 0;
8029}
8030
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008031static void bnx2x_reset_task(struct work_struct *work)
8032{
8033 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8034
8035#ifdef BNX2X_STOP_ON_ERROR
8036 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8037 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07008038 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008039 return;
8040#endif
8041
8042 rtnl_lock();
8043
8044 if (!netif_running(bp->dev))
8045 goto reset_task_exit;
8046
8047 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8048 bnx2x_nic_load(bp, LOAD_NORMAL);
8049
8050reset_task_exit:
8051 rtnl_unlock();
8052}
8053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008054/* end of nic load/unload */
8055
8056/* ethtool_ops */
8057
8058/*
8059 * Init service functions
8060 */
8061
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008062static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8063{
8064 switch (func) {
8065 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8066 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8067 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8068 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8069 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8070 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8071 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8072 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8073 default:
8074 BNX2X_ERR("Unsupported function index: %d\n", func);
8075 return (u32)(-1);
8076 }
8077}
8078
8079static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8080{
8081 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8082
8083 /* Flush all outstanding writes */
8084 mmiowb();
8085
8086 /* Pretend to be function 0 */
8087 REG_WR(bp, reg, 0);
8088 /* Flush the GRC transaction (in the chip) */
8089 new_val = REG_RD(bp, reg);
8090 if (new_val != 0) {
8091 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8092 new_val);
8093 BUG();
8094 }
8095
8096 /* From now we are in the "like-E1" mode */
8097 bnx2x_int_disable(bp);
8098
8099 /* Flush all outstanding writes */
8100 mmiowb();
8101
8102 /* Restore the original funtion settings */
8103 REG_WR(bp, reg, orig_func);
8104 new_val = REG_RD(bp, reg);
8105 if (new_val != orig_func) {
8106 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8107 orig_func, new_val);
8108 BUG();
8109 }
8110}
8111
8112static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8113{
8114 if (CHIP_IS_E1H(bp))
8115 bnx2x_undi_int_disable_e1h(bp, func);
8116 else
8117 bnx2x_int_disable(bp);
8118}
8119
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008120static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008121{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008122 u32 val;
8123
8124 /* Check if there is any driver already loaded */
8125 val = REG_RD(bp, MISC_REG_UNPREPARED);
8126 if (val == 0x1) {
8127 /* Check if it is the UNDI driver
8128 * UNDI driver initializes CID offset for normal bell to 0x7
8129 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008130 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008131 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8132 if (val == 0x7) {
8133 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008134 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008135 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008136 u32 swap_en;
8137 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008138
Eilon Greensteinb4661732009-01-14 06:43:56 +00008139 /* clear the UNDI indication */
8140 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8141
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008142 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8143
8144 /* try unload UNDI on port 0 */
8145 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008146 bp->fw_seq =
8147 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8148 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008149 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008150
8151 /* if UNDI is loaded on the other port */
8152 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8153
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008154 /* send "DONE" for previous unload */
8155 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8156
8157 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008158 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008159 bp->fw_seq =
8160 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8161 DRV_MSG_SEQ_NUMBER_MASK);
8162 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008163
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008164 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008165 }
8166
Eilon Greensteinb4661732009-01-14 06:43:56 +00008167 /* now it's safe to release the lock */
8168 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8169
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00008170 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008171
8172 /* close input traffic and wait for it */
8173 /* Do not rcv packets to BRB */
8174 REG_WR(bp,
8175 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8176 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8177 /* Do not direct rcv packets that are not for MCP to
8178 * the BRB */
8179 REG_WR(bp,
8180 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8181 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8182 /* clear AEU */
8183 REG_WR(bp,
8184 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8185 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8186 msleep(10);
8187
8188 /* save NIG port swap info */
8189 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8190 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008191 /* reset device */
8192 REG_WR(bp,
8193 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008194 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008195 REG_WR(bp,
8196 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8197 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008198 /* take the NIG out of reset and restore swap values */
8199 REG_WR(bp,
8200 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8201 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8202 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8203 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8204
8205 /* send unload done to the MCP */
8206 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8207
8208 /* restore our func and fw_seq */
8209 bp->func = func;
8210 bp->fw_seq =
8211 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8212 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00008213
8214 } else
8215 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008216 }
8217}
8218
8219static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8220{
8221 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008222 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008223
8224 /* Get the chip revision id and number. */
8225 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8226 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8227 id = ((val & 0xffff) << 16);
8228 val = REG_RD(bp, MISC_REG_CHIP_REV);
8229 id |= ((val & 0xf) << 12);
8230 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8231 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00008232 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008233 id |= (val & 0xf);
8234 bp->common.chip_id = id;
8235 bp->link_params.chip_id = bp->common.chip_id;
8236 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8237
Eilon Greenstein1c063282009-02-12 08:36:43 +00008238 val = (REG_RD(bp, 0x2874) & 0x55);
8239 if ((bp->common.chip_id & 0x1) ||
8240 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8241 bp->flags |= ONE_PORT_FLAG;
8242 BNX2X_DEV_INFO("single port device\n");
8243 }
8244
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008245 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8246 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8247 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8248 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8249 bp->common.flash_size, bp->common.flash_size);
8250
8251 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008252 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008253 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008254 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8255 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008256
8257 if (!bp->common.shmem_base ||
8258 (bp->common.shmem_base < 0xA0000) ||
8259 (bp->common.shmem_base >= 0xC0000)) {
8260 BNX2X_DEV_INFO("MCP not active\n");
8261 bp->flags |= NO_MCP_FLAG;
8262 return;
8263 }
8264
8265 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8266 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8268 BNX2X_ERR("BAD MCP validity signature\n");
8269
8270 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008271 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008272
8273 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8274 SHARED_HW_CFG_LED_MODE_MASK) >>
8275 SHARED_HW_CFG_LED_MODE_SHIFT);
8276
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008277 bp->link_params.feature_config_flags = 0;
8278 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8279 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8280 bp->link_params.feature_config_flags |=
8281 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8282 else
8283 bp->link_params.feature_config_flags &=
8284 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8285
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008286 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8287 bp->common.bc_ver = val;
8288 BNX2X_DEV_INFO("bc_ver %X\n", val);
8289 if (val < BNX2X_BC_VER) {
8290 /* for now only warn
8291 * later we might need to enforce this */
8292 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8293 " please upgrade BC\n", BNX2X_BC_VER, val);
8294 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008295 bp->link_params.feature_config_flags |=
8296 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8297 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008298
8299 if (BP_E1HVN(bp) == 0) {
8300 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8301 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8302 } else {
8303 /* no WOL capability for E1HVN != 0 */
8304 bp->flags |= NO_WOL_FLAG;
8305 }
8306 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008307 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008308
8309 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8310 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8311 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8312 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8313
Joe Perches7995c642010-02-17 15:01:52 +00008314 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008315}
8316
8317static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8318 u32 switch_cfg)
8319{
8320 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008321 u32 ext_phy_type;
8322
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008323 switch (switch_cfg) {
8324 case SWITCH_CFG_1G:
8325 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8326
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008327 ext_phy_type =
8328 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008329 switch (ext_phy_type) {
8330 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8331 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8332 ext_phy_type);
8333
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008334 bp->port.supported |= (SUPPORTED_10baseT_Half |
8335 SUPPORTED_10baseT_Full |
8336 SUPPORTED_100baseT_Half |
8337 SUPPORTED_100baseT_Full |
8338 SUPPORTED_1000baseT_Full |
8339 SUPPORTED_2500baseX_Full |
8340 SUPPORTED_TP |
8341 SUPPORTED_FIBRE |
8342 SUPPORTED_Autoneg |
8343 SUPPORTED_Pause |
8344 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008345 break;
8346
8347 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8348 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8349 ext_phy_type);
8350
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008351 bp->port.supported |= (SUPPORTED_10baseT_Half |
8352 SUPPORTED_10baseT_Full |
8353 SUPPORTED_100baseT_Half |
8354 SUPPORTED_100baseT_Full |
8355 SUPPORTED_1000baseT_Full |
8356 SUPPORTED_TP |
8357 SUPPORTED_FIBRE |
8358 SUPPORTED_Autoneg |
8359 SUPPORTED_Pause |
8360 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008361 break;
8362
8363 default:
8364 BNX2X_ERR("NVRAM config error. "
8365 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008366 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008367 return;
8368 }
8369
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008370 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8371 port*0x10);
8372 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008373 break;
8374
8375 case SWITCH_CFG_10G:
8376 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8377
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008378 ext_phy_type =
8379 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008380 switch (ext_phy_type) {
8381 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8382 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8383 ext_phy_type);
8384
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008385 bp->port.supported |= (SUPPORTED_10baseT_Half |
8386 SUPPORTED_10baseT_Full |
8387 SUPPORTED_100baseT_Half |
8388 SUPPORTED_100baseT_Full |
8389 SUPPORTED_1000baseT_Full |
8390 SUPPORTED_2500baseX_Full |
8391 SUPPORTED_10000baseT_Full |
8392 SUPPORTED_TP |
8393 SUPPORTED_FIBRE |
8394 SUPPORTED_Autoneg |
8395 SUPPORTED_Pause |
8396 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008397 break;
8398
Eliezer Tamirf1410642008-02-28 11:51:50 -08008399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8400 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8401 ext_phy_type);
8402
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008403 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8404 SUPPORTED_1000baseT_Full |
8405 SUPPORTED_FIBRE |
8406 SUPPORTED_Autoneg |
8407 SUPPORTED_Pause |
8408 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008409 break;
8410
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8413 ext_phy_type);
8414
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416 SUPPORTED_2500baseX_Full |
8417 SUPPORTED_1000baseT_Full |
8418 SUPPORTED_FIBRE |
8419 SUPPORTED_Autoneg |
8420 SUPPORTED_Pause |
8421 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008422 break;
8423
Eilon Greenstein589abe32009-02-12 08:36:55 +00008424 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8425 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8426 ext_phy_type);
8427
8428 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8429 SUPPORTED_FIBRE |
8430 SUPPORTED_Pause |
8431 SUPPORTED_Asym_Pause);
8432 break;
8433
8434 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8435 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8436 ext_phy_type);
8437
8438 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8439 SUPPORTED_1000baseT_Full |
8440 SUPPORTED_FIBRE |
8441 SUPPORTED_Pause |
8442 SUPPORTED_Asym_Pause);
8443 break;
8444
8445 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8446 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8447 ext_phy_type);
8448
8449 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8450 SUPPORTED_1000baseT_Full |
8451 SUPPORTED_Autoneg |
8452 SUPPORTED_FIBRE |
8453 SUPPORTED_Pause |
8454 SUPPORTED_Asym_Pause);
8455 break;
8456
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8458 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8459 ext_phy_type);
8460
8461 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8462 SUPPORTED_1000baseT_Full |
8463 SUPPORTED_Autoneg |
8464 SUPPORTED_FIBRE |
8465 SUPPORTED_Pause |
8466 SUPPORTED_Asym_Pause);
8467 break;
8468
Eliezer Tamirf1410642008-02-28 11:51:50 -08008469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8470 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8471 ext_phy_type);
8472
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008473 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8474 SUPPORTED_TP |
8475 SUPPORTED_Autoneg |
8476 SUPPORTED_Pause |
8477 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008478 break;
8479
Eilon Greenstein28577182009-02-12 08:37:00 +00008480 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8481 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8482 ext_phy_type);
8483
8484 bp->port.supported |= (SUPPORTED_10baseT_Half |
8485 SUPPORTED_10baseT_Full |
8486 SUPPORTED_100baseT_Half |
8487 SUPPORTED_100baseT_Full |
8488 SUPPORTED_1000baseT_Full |
8489 SUPPORTED_10000baseT_Full |
8490 SUPPORTED_TP |
8491 SUPPORTED_Autoneg |
8492 SUPPORTED_Pause |
8493 SUPPORTED_Asym_Pause);
8494 break;
8495
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8497 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8498 bp->link_params.ext_phy_config);
8499 break;
8500
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008501 default:
8502 BNX2X_ERR("NVRAM config error. "
8503 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008504 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008505 return;
8506 }
8507
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008508 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8509 port*0x18);
8510 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008511
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008512 break;
8513
8514 default:
8515 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008516 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008517 return;
8518 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008519 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008520
8521 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008522 if (!(bp->link_params.speed_cap_mask &
8523 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008524 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008525
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008526 if (!(bp->link_params.speed_cap_mask &
8527 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008528 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008529
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008532 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008533
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008536 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008537
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008538 if (!(bp->link_params.speed_cap_mask &
8539 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008540 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8541 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008543 if (!(bp->link_params.speed_cap_mask &
8544 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008545 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008546
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008547 if (!(bp->link_params.speed_cap_mask &
8548 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008549 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008551 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008552}
8553
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008554static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008555{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008556 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008557
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008558 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008559 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008561 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008562 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008563 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008564 u32 ext_phy_type =
8565 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8566
8567 if ((ext_phy_type ==
8568 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8569 (ext_phy_type ==
8570 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008571 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008572 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008573 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008574 (ADVERTISED_10000baseT_Full |
8575 ADVERTISED_FIBRE);
8576 break;
8577 }
8578 BNX2X_ERR("NVRAM config error. "
8579 "Invalid link_config 0x%x"
8580 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008581 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008582 return;
8583 }
8584 break;
8585
8586 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008587 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008588 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008589 bp->port.advertising = (ADVERTISED_10baseT_Full |
8590 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008591 } else {
8592 BNX2X_ERR("NVRAM config error. "
8593 "Invalid link_config 0x%x"
8594 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008595 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008596 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008597 return;
8598 }
8599 break;
8600
8601 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008602 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008603 bp->link_params.req_line_speed = SPEED_10;
8604 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008605 bp->port.advertising = (ADVERTISED_10baseT_Half |
8606 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008607 } else {
8608 BNX2X_ERR("NVRAM config error. "
8609 "Invalid link_config 0x%x"
8610 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008611 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008612 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008613 return;
8614 }
8615 break;
8616
8617 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008618 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008619 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620 bp->port.advertising = (ADVERTISED_100baseT_Full |
8621 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008622 } else {
8623 BNX2X_ERR("NVRAM config error. "
8624 "Invalid link_config 0x%x"
8625 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008626 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008627 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008628 return;
8629 }
8630 break;
8631
8632 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008633 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008634 bp->link_params.req_line_speed = SPEED_100;
8635 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008636 bp->port.advertising = (ADVERTISED_100baseT_Half |
8637 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008638 } else {
8639 BNX2X_ERR("NVRAM config error. "
8640 "Invalid link_config 0x%x"
8641 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008642 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008643 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008644 return;
8645 }
8646 break;
8647
8648 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008649 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008650 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008651 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8652 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008653 } else {
8654 BNX2X_ERR("NVRAM config error. "
8655 "Invalid link_config 0x%x"
8656 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008657 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008658 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008659 return;
8660 }
8661 break;
8662
8663 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008664 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008665 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008666 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8667 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008668 } else {
8669 BNX2X_ERR("NVRAM config error. "
8670 "Invalid link_config 0x%x"
8671 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008672 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008673 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008674 return;
8675 }
8676 break;
8677
8678 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8679 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8680 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008681 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008682 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008683 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8684 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008685 } else {
8686 BNX2X_ERR("NVRAM config error. "
8687 "Invalid link_config 0x%x"
8688 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008689 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008690 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008691 return;
8692 }
8693 break;
8694
8695 default:
8696 BNX2X_ERR("NVRAM config error. "
8697 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008698 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008699 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008700 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008701 break;
8702 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008704 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8705 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008706 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008707 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008708 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008709
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008710 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008711 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008712 bp->link_params.req_line_speed,
8713 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008715}
8716
Michael Chane665bfd2009-10-10 13:46:54 +00008717static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8718{
8719 mac_hi = cpu_to_be16(mac_hi);
8720 mac_lo = cpu_to_be32(mac_lo);
8721 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8722 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8723}
8724
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008725static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008726{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008727 int port = BP_PORT(bp);
8728 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008729 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008730 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008731 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008732
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008733 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008734 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008735
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008736 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008737 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008738 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008739 SHMEM_RD(bp,
8740 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008741 /* BCM8727_NOC => BCM8727 no over current */
8742 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8743 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8744 bp->link_params.ext_phy_config &=
8745 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8746 bp->link_params.ext_phy_config |=
8747 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8748 bp->link_params.feature_config_flags |=
8749 FEATURE_CONFIG_BCM8727_NOC;
8750 }
8751
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008752 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008753 SHMEM_RD(bp,
8754 dev_info.port_hw_config[port].speed_capability_mask);
8755
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008756 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008757 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8758
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008759 /* Get the 4 lanes xgxs config rx and tx */
8760 for (i = 0; i < 2; i++) {
8761 val = SHMEM_RD(bp,
8762 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8763 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8764 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8765
8766 val = SHMEM_RD(bp,
8767 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8768 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8769 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8770 }
8771
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008772 /* If the device is capable of WoL, set the default state according
8773 * to the HW
8774 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008775 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008776 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8777 (config & PORT_FEATURE_WOL_ENABLED));
8778
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008779 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8780 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008781 bp->link_params.lane_config,
8782 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008783 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008784
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008785 bp->link_params.switch_cfg |= (bp->port.link_config &
8786 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008787 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008788
8789 bnx2x_link_settings_requested(bp);
8790
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008791 /*
8792 * If connected directly, work with the internal PHY, otherwise, work
8793 * with the external PHY
8794 */
8795 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8796 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8797 bp->mdio.prtad = bp->link_params.phy_addr;
8798
8799 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8800 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8801 bp->mdio.prtad =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00008802 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008803
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008804 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8805 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008806 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008807 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8808 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008809
8810#ifdef BCM_CNIC
8811 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8812 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8813 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8814#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008815}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008816
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008817static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8818{
8819 int func = BP_FUNC(bp);
8820 u32 val, val2;
8821 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008822
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008823 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008824
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008825 bp->e1hov = 0;
8826 bp->e1hmf = 0;
8827 if (CHIP_IS_E1H(bp)) {
8828 bp->mf_config =
8829 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008830
Eilon Greenstein2691d512009-08-12 08:22:08 +00008831 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008832 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008833 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008834 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008835 BNX2X_DEV_INFO("%s function mode\n",
8836 IS_E1HMF(bp) ? "multi" : "single");
8837
8838 if (IS_E1HMF(bp)) {
8839 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8840 e1hov_tag) &
8841 FUNC_MF_CFG_E1HOV_TAG_MASK);
8842 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8843 bp->e1hov = val;
8844 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8845 "(0x%04x)\n",
8846 func, bp->e1hov, bp->e1hov);
8847 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008848 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8849 " aborting\n", func);
8850 rc = -EPERM;
8851 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008852 } else {
8853 if (BP_E1HVN(bp)) {
8854 BNX2X_ERR("!!! VN %d in single function mode,"
8855 " aborting\n", BP_E1HVN(bp));
8856 rc = -EPERM;
8857 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008858 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008859 }
8860
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008861 if (!BP_NOMCP(bp)) {
8862 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008863
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008864 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8865 DRV_MSG_SEQ_NUMBER_MASK);
8866 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8867 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008868
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008869 if (IS_E1HMF(bp)) {
8870 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8871 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8872 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8873 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8874 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8875 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8876 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8877 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8878 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8879 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8880 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8881 ETH_ALEN);
8882 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8883 ETH_ALEN);
8884 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008885
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008886 return rc;
8887 }
8888
8889 if (BP_NOMCP(bp)) {
8890 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008891 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008892 random_ether_addr(bp->dev->dev_addr);
8893 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8894 }
8895
8896 return rc;
8897}
8898
8899static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8900{
8901 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008902 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008903 int rc;
8904
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008905 /* Disable interrupt handling until HW is initialized */
8906 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008907 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008908
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008909 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008910 mutex_init(&bp->fw_mb_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +00008911#ifdef BCM_CNIC
8912 mutex_init(&bp->cnic_mutex);
8913#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008914
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008915 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008916 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8917
8918 rc = bnx2x_get_hwinfo(bp);
8919
8920 /* need to reset chip if undi was active */
8921 if (!BP_NOMCP(bp))
8922 bnx2x_undi_unload(bp);
8923
8924 if (CHIP_REV_IS_FPGA(bp))
Joe Perches7995c642010-02-17 15:01:52 +00008925 pr_err("FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008926
8927 if (BP_NOMCP(bp) && (func == 0))
Joe Perches7995c642010-02-17 15:01:52 +00008928 pr_err("MCP disabled, must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008929
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008930 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Joe Perches7995c642010-02-17 15:01:52 +00008933 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008934 multi_mode = ETH_RSS_MODE_DISABLED;
8935 }
8936 bp->multi_mode = multi_mode;
8937
8938
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008939 /* Set TPA flags */
8940 if (disable_tpa) {
8941 bp->flags &= ~TPA_ENABLE_FLAG;
8942 bp->dev->features &= ~NETIF_F_LRO;
8943 } else {
8944 bp->flags |= TPA_ENABLE_FLAG;
8945 bp->dev->features |= NETIF_F_LRO;
8946 }
8947
Eilon Greensteina18f5122009-08-12 08:23:26 +00008948 if (CHIP_IS_E1(bp))
8949 bp->dropless_fc = 0;
8950 else
8951 bp->dropless_fc = dropless_fc;
8952
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008953 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008954
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008955 bp->tx_ring_size = MAX_TX_AVAIL;
8956 bp->rx_ring_size = MAX_RX_AVAIL;
8957
8958 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008959
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008960 /* make sure that the numbers are in the right granularity */
8961 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8962 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008963
Eilon Greenstein87942b42009-02-12 08:36:49 +00008964 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8965 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008966
8967 init_timer(&bp->timer);
8968 bp->timer.expires = jiffies + bp->current_interval;
8969 bp->timer.data = (unsigned long) bp;
8970 bp->timer.function = bnx2x_timer;
8971
8972 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008973}
8974
8975/*
8976 * ethtool service functions
8977 */
8978
8979/* All ethtool functions called with rtnl_lock */
8980
8981static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8982{
8983 struct bnx2x *bp = netdev_priv(dev);
8984
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008985 cmd->supported = bp->port.supported;
8986 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008987
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07008988 if ((bp->state == BNX2X_STATE_OPEN) &&
8989 !(bp->flags & MF_FUNC_DIS) &&
8990 (bp->link_vars.link_up)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008991 cmd->speed = bp->link_vars.line_speed;
8992 cmd->duplex = bp->link_vars.duplex;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008993 if (IS_E1HMF(bp)) {
8994 u16 vn_max_rate;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008995
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008996 vn_max_rate =
8997 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008998 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07008999 if (vn_max_rate < cmd->speed)
9000 cmd->speed = vn_max_rate;
9001 }
9002 } else {
9003 cmd->speed = -1;
9004 cmd->duplex = -1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009005 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009006
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009007 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9008 u32 ext_phy_type =
9009 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08009010
9011 switch (ext_phy_type) {
9012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00009015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00009018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009019 cmd->port = PORT_FIBRE;
9020 break;
9021
9022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00009023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009024 cmd->port = PORT_TP;
9025 break;
9026
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009027 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9028 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9029 bp->link_params.ext_phy_config);
9030 break;
9031
Eliezer Tamirf1410642008-02-28 11:51:50 -08009032 default:
9033 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009034 bp->link_params.ext_phy_config);
9035 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009036 }
9037 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009039
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009040 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009041 cmd->transceiver = XCVR_INTERNAL;
9042
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009043 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009044 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009045 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009046 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009047
9048 cmd->maxtxpkt = 0;
9049 cmd->maxrxpkt = 0;
9050
9051 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9052 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9053 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9054 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9055 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9056 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9057 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9058
9059 return 0;
9060}
9061
9062static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9063{
9064 struct bnx2x *bp = netdev_priv(dev);
9065 u32 advertising;
9066
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009067 if (IS_E1HMF(bp))
9068 return 0;
9069
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9071 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9072 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9073 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9074 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9075 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9076 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9077
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009079 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9080 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009081 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009082 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009083
9084 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009085 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009086
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009087 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9088 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009089 bp->port.advertising |= (ADVERTISED_Autoneg |
9090 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009091
9092 } else { /* forced speed */
9093 /* advertise the requested speed and duplex if supported */
9094 switch (cmd->speed) {
9095 case SPEED_10:
9096 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009097 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009098 SUPPORTED_10baseT_Full)) {
9099 DP(NETIF_MSG_LINK,
9100 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009101 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009102 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009103
9104 advertising = (ADVERTISED_10baseT_Full |
9105 ADVERTISED_TP);
9106 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009107 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009108 SUPPORTED_10baseT_Half)) {
9109 DP(NETIF_MSG_LINK,
9110 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009111 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009112 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009113
9114 advertising = (ADVERTISED_10baseT_Half |
9115 ADVERTISED_TP);
9116 }
9117 break;
9118
9119 case SPEED_100:
9120 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009121 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009122 SUPPORTED_100baseT_Full)) {
9123 DP(NETIF_MSG_LINK,
9124 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009125 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009126 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009127
9128 advertising = (ADVERTISED_100baseT_Full |
9129 ADVERTISED_TP);
9130 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009131 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08009132 SUPPORTED_100baseT_Half)) {
9133 DP(NETIF_MSG_LINK,
9134 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009135 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009136 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009137
9138 advertising = (ADVERTISED_100baseT_Half |
9139 ADVERTISED_TP);
9140 }
9141 break;
9142
9143 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009144 if (cmd->duplex != DUPLEX_FULL) {
9145 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009146 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009147 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009148
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009149 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009150 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009151 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009152 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009153
9154 advertising = (ADVERTISED_1000baseT_Full |
9155 ADVERTISED_TP);
9156 break;
9157
9158 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009159 if (cmd->duplex != DUPLEX_FULL) {
9160 DP(NETIF_MSG_LINK,
9161 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009162 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009163 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009164
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009165 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009166 DP(NETIF_MSG_LINK,
9167 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009168 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009169 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009170
Eliezer Tamirf1410642008-02-28 11:51:50 -08009171 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009172 ADVERTISED_TP);
9173 break;
9174
9175 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009176 if (cmd->duplex != DUPLEX_FULL) {
9177 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009178 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009179 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009180
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009181 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08009182 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009183 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08009184 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009185
9186 advertising = (ADVERTISED_10000baseT_Full |
9187 ADVERTISED_FIBRE);
9188 break;
9189
9190 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08009191 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009192 return -EINVAL;
9193 }
9194
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009195 bp->link_params.req_line_speed = cmd->speed;
9196 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009197 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009198 }
9199
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009200 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009201 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009202 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009203 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009204
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009205 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009206 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009207 bnx2x_link_set(bp);
9208 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009209
9210 return 0;
9211}
9212
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009213#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9214#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9215
9216static int bnx2x_get_regs_len(struct net_device *dev)
9217{
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009218 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009219 int regdump_len = 0;
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009220 int i;
9221
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00009222 if (CHIP_IS_E1(bp)) {
9223 for (i = 0; i < REGS_COUNT; i++)
9224 if (IS_E1_ONLINE(reg_addrs[i].info))
9225 regdump_len += reg_addrs[i].size;
9226
9227 for (i = 0; i < WREGS_COUNT_E1; i++)
9228 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9229 regdump_len += wreg_addrs_e1[i].size *
9230 (1 + wreg_addrs_e1[i].read_regs_count);
9231
9232 } else { /* E1H */
9233 for (i = 0; i < REGS_COUNT; i++)
9234 if (IS_E1H_ONLINE(reg_addrs[i].info))
9235 regdump_len += reg_addrs[i].size;
9236
9237 for (i = 0; i < WREGS_COUNT_E1H; i++)
9238 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9239 regdump_len += wreg_addrs_e1h[i].size *
9240 (1 + wreg_addrs_e1h[i].read_regs_count);
9241 }
9242 regdump_len *= 4;
9243 regdump_len += sizeof(struct dump_hdr);
9244
9245 return regdump_len;
9246}
9247
9248static void bnx2x_get_regs(struct net_device *dev,
9249 struct ethtool_regs *regs, void *_p)
9250{
9251 u32 *p = _p, i, j;
9252 struct bnx2x *bp = netdev_priv(dev);
9253 struct dump_hdr dump_hdr = {0};
9254
9255 regs->version = 0;
9256 memset(p, 0, regs->len);
9257
9258 if (!netif_running(bp->dev))
9259 return;
9260
9261 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9262 dump_hdr.dump_sign = dump_sign_all;
9263 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9264 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9265 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9266 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9267 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9268
9269 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9270 p += dump_hdr.hdr_size + 1;
9271
9272 if (CHIP_IS_E1(bp)) {
9273 for (i = 0; i < REGS_COUNT; i++)
9274 if (IS_E1_ONLINE(reg_addrs[i].info))
9275 for (j = 0; j < reg_addrs[i].size; j++)
9276 *p++ = REG_RD(bp,
9277 reg_addrs[i].addr + j*4);
9278
9279 } else { /* E1H */
9280 for (i = 0; i < REGS_COUNT; i++)
9281 if (IS_E1H_ONLINE(reg_addrs[i].info))
9282 for (j = 0; j < reg_addrs[i].size; j++)
9283 *p++ = REG_RD(bp,
9284 reg_addrs[i].addr + j*4);
9285 }
9286}
9287
Eilon Greenstein0d28e492009-08-12 08:23:40 +00009288#define PHY_FW_VER_LEN 10
9289
9290static void bnx2x_get_drvinfo(struct net_device *dev,
9291 struct ethtool_drvinfo *info)
9292{
9293 struct bnx2x *bp = netdev_priv(dev);
9294 u8 phy_fw_ver[PHY_FW_VER_LEN];
9295
9296 strcpy(info->driver, DRV_MODULE_NAME);
9297 strcpy(info->version, DRV_MODULE_VERSION);
9298
9299 phy_fw_ver[0] = '\0';
9300 if (bp->port.pmf) {
9301 bnx2x_acquire_phy_lock(bp);
9302 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9303 (bp->state != BNX2X_STATE_CLOSED),
9304 phy_fw_ver, PHY_FW_VER_LEN);
9305 bnx2x_release_phy_lock(bp);
9306 }
9307
9308 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9309 (bp->common.bc_ver & 0xff0000) >> 16,
9310 (bp->common.bc_ver & 0xff00) >> 8,
9311 (bp->common.bc_ver & 0xff),
9312 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9313 strcpy(info->bus_info, pci_name(bp->pdev));
9314 info->n_stats = BNX2X_NUM_STATS;
9315 info->testinfo_len = BNX2X_NUM_TESTS;
9316 info->eedump_len = bp->common.flash_size;
9317 info->regdump_len = bnx2x_get_regs_len(dev);
9318}
9319
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009320static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9321{
9322 struct bnx2x *bp = netdev_priv(dev);
9323
9324 if (bp->flags & NO_WOL_FLAG) {
9325 wol->supported = 0;
9326 wol->wolopts = 0;
9327 } else {
9328 wol->supported = WAKE_MAGIC;
9329 if (bp->wol)
9330 wol->wolopts = WAKE_MAGIC;
9331 else
9332 wol->wolopts = 0;
9333 }
9334 memset(&wol->sopass, 0, sizeof(wol->sopass));
9335}
9336
9337static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9338{
9339 struct bnx2x *bp = netdev_priv(dev);
9340
9341 if (wol->wolopts & ~WAKE_MAGIC)
9342 return -EINVAL;
9343
9344 if (wol->wolopts & WAKE_MAGIC) {
9345 if (bp->flags & NO_WOL_FLAG)
9346 return -EINVAL;
9347
9348 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009349 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009350 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009351
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009352 return 0;
9353}
9354
9355static u32 bnx2x_get_msglevel(struct net_device *dev)
9356{
9357 struct bnx2x *bp = netdev_priv(dev);
9358
Joe Perches7995c642010-02-17 15:01:52 +00009359 return bp->msg_enable;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009360}
9361
9362static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9363{
9364 struct bnx2x *bp = netdev_priv(dev);
9365
9366 if (capable(CAP_NET_ADMIN))
Joe Perches7995c642010-02-17 15:01:52 +00009367 bp->msg_enable = level;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009368}
9369
9370static int bnx2x_nway_reset(struct net_device *dev)
9371{
9372 struct bnx2x *bp = netdev_priv(dev);
9373
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009374 if (!bp->port.pmf)
9375 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009376
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009377 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009378 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009379 bnx2x_link_set(bp);
9380 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009381
9382 return 0;
9383}
9384
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009385static u32 bnx2x_get_link(struct net_device *dev)
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009386{
9387 struct bnx2x *bp = netdev_priv(dev);
9388
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009389 if (bp->flags & MF_FUNC_DIS)
9390 return 0;
9391
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009392 return bp->link_vars.link_up;
9393}
9394
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009395static int bnx2x_get_eeprom_len(struct net_device *dev)
9396{
9397 struct bnx2x *bp = netdev_priv(dev);
9398
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009399 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009400}
9401
9402static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9403{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009404 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009405 int count, i;
9406 u32 val = 0;
9407
9408 /* adjust timeout for emulation/FPGA */
9409 count = NVRAM_TIMEOUT_COUNT;
9410 if (CHIP_REV_IS_SLOW(bp))
9411 count *= 100;
9412
9413 /* request access to nvram interface */
9414 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9415 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9416
9417 for (i = 0; i < count*10; i++) {
9418 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9419 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9420 break;
9421
9422 udelay(5);
9423 }
9424
9425 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009426 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009427 return -EBUSY;
9428 }
9429
9430 return 0;
9431}
9432
9433static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9434{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009435 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009436 int count, i;
9437 u32 val = 0;
9438
9439 /* adjust timeout for emulation/FPGA */
9440 count = NVRAM_TIMEOUT_COUNT;
9441 if (CHIP_REV_IS_SLOW(bp))
9442 count *= 100;
9443
9444 /* relinquish nvram interface */
9445 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9446 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9447
9448 for (i = 0; i < count*10; i++) {
9449 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9450 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9451 break;
9452
9453 udelay(5);
9454 }
9455
9456 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009457 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009458 return -EBUSY;
9459 }
9460
9461 return 0;
9462}
9463
9464static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9465{
9466 u32 val;
9467
9468 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9469
9470 /* enable both bits, even on read */
9471 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9472 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9473 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9474}
9475
9476static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9477{
9478 u32 val;
9479
9480 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9481
9482 /* disable both bits, even after read */
9483 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9484 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9485 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9486}
9487
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009488static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009489 u32 cmd_flags)
9490{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009491 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009492 u32 val;
9493
9494 /* build the command word */
9495 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9496
9497 /* need to clear DONE bit separately */
9498 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9499
9500 /* address of the NVRAM to read from */
9501 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9502 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9503
9504 /* issue a read command */
9505 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9506
9507 /* adjust timeout for emulation/FPGA */
9508 count = NVRAM_TIMEOUT_COUNT;
9509 if (CHIP_REV_IS_SLOW(bp))
9510 count *= 100;
9511
9512 /* wait for completion */
9513 *ret_val = 0;
9514 rc = -EBUSY;
9515 for (i = 0; i < count; i++) {
9516 udelay(5);
9517 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9518
9519 if (val & MCPR_NVM_COMMAND_DONE) {
9520 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009521 /* we read nvram data in cpu order
9522 * but ethtool sees it as an array of bytes
9523 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009524 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009525 rc = 0;
9526 break;
9527 }
9528 }
9529
9530 return rc;
9531}
9532
9533static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9534 int buf_size)
9535{
9536 int rc;
9537 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009538 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009539
9540 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009541 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009542 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009543 offset, buf_size);
9544 return -EINVAL;
9545 }
9546
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009547 if (offset + buf_size > bp->common.flash_size) {
9548 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009549 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009550 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009551 return -EINVAL;
9552 }
9553
9554 /* request access to nvram interface */
9555 rc = bnx2x_acquire_nvram_lock(bp);
9556 if (rc)
9557 return rc;
9558
9559 /* enable access to nvram interface */
9560 bnx2x_enable_nvram_access(bp);
9561
9562 /* read the first word(s) */
9563 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9564 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9565 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9566 memcpy(ret_buf, &val, 4);
9567
9568 /* advance to the next dword */
9569 offset += sizeof(u32);
9570 ret_buf += sizeof(u32);
9571 buf_size -= sizeof(u32);
9572 cmd_flags = 0;
9573 }
9574
9575 if (rc == 0) {
9576 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9577 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9578 memcpy(ret_buf, &val, 4);
9579 }
9580
9581 /* disable access to nvram interface */
9582 bnx2x_disable_nvram_access(bp);
9583 bnx2x_release_nvram_lock(bp);
9584
9585 return rc;
9586}
9587
9588static int bnx2x_get_eeprom(struct net_device *dev,
9589 struct ethtool_eeprom *eeprom, u8 *eebuf)
9590{
9591 struct bnx2x *bp = netdev_priv(dev);
9592 int rc;
9593
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009594 if (!netif_running(dev))
9595 return -EAGAIN;
9596
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009597 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009598 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9599 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9600 eeprom->len, eeprom->len);
9601
9602 /* parameters already validated in ethtool_get_eeprom */
9603
9604 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9605
9606 return rc;
9607}
9608
9609static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9610 u32 cmd_flags)
9611{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009612 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009613
9614 /* build the command word */
9615 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9616
9617 /* need to clear DONE bit separately */
9618 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9619
9620 /* write the data */
9621 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9622
9623 /* address of the NVRAM to write to */
9624 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9625 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9626
9627 /* issue the write command */
9628 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9629
9630 /* adjust timeout for emulation/FPGA */
9631 count = NVRAM_TIMEOUT_COUNT;
9632 if (CHIP_REV_IS_SLOW(bp))
9633 count *= 100;
9634
9635 /* wait for completion */
9636 rc = -EBUSY;
9637 for (i = 0; i < count; i++) {
9638 udelay(5);
9639 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9640 if (val & MCPR_NVM_COMMAND_DONE) {
9641 rc = 0;
9642 break;
9643 }
9644 }
9645
9646 return rc;
9647}
9648
Eliezer Tamirf1410642008-02-28 11:51:50 -08009649#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009650
9651static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9652 int buf_size)
9653{
9654 int rc;
9655 u32 cmd_flags;
9656 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009657 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009658
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009659 if (offset + buf_size > bp->common.flash_size) {
9660 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009661 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009662 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009663 return -EINVAL;
9664 }
9665
9666 /* request access to nvram interface */
9667 rc = bnx2x_acquire_nvram_lock(bp);
9668 if (rc)
9669 return rc;
9670
9671 /* enable access to nvram interface */
9672 bnx2x_enable_nvram_access(bp);
9673
9674 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9675 align_offset = (offset & ~0x03);
9676 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9677
9678 if (rc == 0) {
9679 val &= ~(0xff << BYTE_OFFSET(offset));
9680 val |= (*data_buf << BYTE_OFFSET(offset));
9681
9682 /* nvram data is returned as an array of bytes
9683 * convert it back to cpu order */
9684 val = be32_to_cpu(val);
9685
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009686 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9687 cmd_flags);
9688 }
9689
9690 /* disable access to nvram interface */
9691 bnx2x_disable_nvram_access(bp);
9692 bnx2x_release_nvram_lock(bp);
9693
9694 return rc;
9695}
9696
9697static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9698 int buf_size)
9699{
9700 int rc;
9701 u32 cmd_flags;
9702 u32 val;
9703 u32 written_so_far;
9704
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009705 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009706 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009707
9708 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009709 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009710 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009711 offset, buf_size);
9712 return -EINVAL;
9713 }
9714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009715 if (offset + buf_size > bp->common.flash_size) {
9716 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009717 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009718 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009719 return -EINVAL;
9720 }
9721
9722 /* request access to nvram interface */
9723 rc = bnx2x_acquire_nvram_lock(bp);
9724 if (rc)
9725 return rc;
9726
9727 /* enable access to nvram interface */
9728 bnx2x_enable_nvram_access(bp);
9729
9730 written_so_far = 0;
9731 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9732 while ((written_so_far < buf_size) && (rc == 0)) {
9733 if (written_so_far == (buf_size - sizeof(u32)))
9734 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9735 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9736 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9737 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9738 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9739
9740 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009741
9742 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9743
9744 /* advance to the next dword */
9745 offset += sizeof(u32);
9746 data_buf += sizeof(u32);
9747 written_so_far += sizeof(u32);
9748 cmd_flags = 0;
9749 }
9750
9751 /* disable access to nvram interface */
9752 bnx2x_disable_nvram_access(bp);
9753 bnx2x_release_nvram_lock(bp);
9754
9755 return rc;
9756}
9757
9758static int bnx2x_set_eeprom(struct net_device *dev,
9759 struct ethtool_eeprom *eeprom, u8 *eebuf)
9760{
9761 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009762 int port = BP_PORT(bp);
9763 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009764
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009765 if (!netif_running(dev))
9766 return -EAGAIN;
9767
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009768 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009769 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9770 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9771 eeprom->len, eeprom->len);
9772
9773 /* parameters already validated in ethtool_set_eeprom */
9774
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009775 /* PHY eeprom can be accessed only by the PMF */
9776 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9777 !bp->port.pmf)
9778 return -EINVAL;
9779
9780 if (eeprom->magic == 0x50485950) {
9781 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9782 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9783
9784 bnx2x_acquire_phy_lock(bp);
9785 rc |= bnx2x_link_reset(&bp->link_params,
9786 &bp->link_vars, 0);
9787 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9788 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9789 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9790 MISC_REGISTERS_GPIO_HIGH, port);
9791 bnx2x_release_phy_lock(bp);
9792 bnx2x_link_report(bp);
9793
9794 } else if (eeprom->magic == 0x50485952) {
9795 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07009796 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009797 bnx2x_acquire_phy_lock(bp);
9798 rc |= bnx2x_link_reset(&bp->link_params,
9799 &bp->link_vars, 1);
9800
9801 rc |= bnx2x_phy_init(&bp->link_params,
9802 &bp->link_vars);
9803 bnx2x_release_phy_lock(bp);
9804 bnx2x_calc_fc_adv(bp);
9805 }
9806 } else if (eeprom->magic == 0x53985943) {
9807 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9808 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9809 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9810 u8 ext_phy_addr =
Eilon Greenstein659bc5c2009-08-12 08:24:02 +00009811 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009812
9813 /* DSP Remove Download Mode */
9814 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9815 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009816
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009817 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009818
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009819 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9820
9821 /* wait 0.5 sec to allow it to run */
9822 msleep(500);
9823 bnx2x_ext_phy_hw_reset(bp, port);
9824 msleep(500);
9825 bnx2x_release_phy_lock(bp);
9826 }
9827 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009828 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009829
9830 return rc;
9831}
9832
9833static int bnx2x_get_coalesce(struct net_device *dev,
9834 struct ethtool_coalesce *coal)
9835{
9836 struct bnx2x *bp = netdev_priv(dev);
9837
9838 memset(coal, 0, sizeof(struct ethtool_coalesce));
9839
9840 coal->rx_coalesce_usecs = bp->rx_ticks;
9841 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009842
9843 return 0;
9844}
9845
Eilon Greensteinca003922009-08-12 22:53:28 -07009846#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009847static int bnx2x_set_coalesce(struct net_device *dev,
9848 struct ethtool_coalesce *coal)
9849{
9850 struct bnx2x *bp = netdev_priv(dev);
9851
9852 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009853 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9854 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009855
9856 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009857 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9858 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009859
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009860 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009861 bnx2x_update_coalesce(bp);
9862
9863 return 0;
9864}
9865
9866static void bnx2x_get_ringparam(struct net_device *dev,
9867 struct ethtool_ringparam *ering)
9868{
9869 struct bnx2x *bp = netdev_priv(dev);
9870
9871 ering->rx_max_pending = MAX_RX_AVAIL;
9872 ering->rx_mini_max_pending = 0;
9873 ering->rx_jumbo_max_pending = 0;
9874
9875 ering->rx_pending = bp->rx_ring_size;
9876 ering->rx_mini_pending = 0;
9877 ering->rx_jumbo_pending = 0;
9878
9879 ering->tx_max_pending = MAX_TX_AVAIL;
9880 ering->tx_pending = bp->tx_ring_size;
9881}
9882
9883static int bnx2x_set_ringparam(struct net_device *dev,
9884 struct ethtool_ringparam *ering)
9885{
9886 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009887 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009888
9889 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9890 (ering->tx_pending > MAX_TX_AVAIL) ||
9891 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9892 return -EINVAL;
9893
9894 bp->rx_ring_size = ering->rx_pending;
9895 bp->tx_ring_size = ering->tx_pending;
9896
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009897 if (netif_running(dev)) {
9898 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9899 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009900 }
9901
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009902 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009903}
9904
9905static void bnx2x_get_pauseparam(struct net_device *dev,
9906 struct ethtool_pauseparam *epause)
9907{
9908 struct bnx2x *bp = netdev_priv(dev);
9909
Eilon Greenstein356e2382009-02-12 08:38:32 +00009910 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9911 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009912 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9913
David S. Millerc0700f92008-12-16 23:53:20 -08009914 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9915 BNX2X_FLOW_CTRL_RX);
9916 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9917 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009918
9919 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9921 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922}
9923
9924static int bnx2x_set_pauseparam(struct net_device *dev,
9925 struct ethtool_pauseparam *epause)
9926{
9927 struct bnx2x *bp = netdev_priv(dev);
9928
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009929 if (IS_E1HMF(bp))
9930 return 0;
9931
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009932 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9933 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9934 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9935
David S. Millerc0700f92008-12-16 23:53:20 -08009936 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009937
9938 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009939 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009940
9941 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009942 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009943
David S. Millerc0700f92008-12-16 23:53:20 -08009944 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9945 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009946
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009947 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009948 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009949 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009950 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009951 }
9952
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009953 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009954 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009955 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009956
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009957 DP(NETIF_MSG_LINK,
9958 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009959
9960 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009961 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009962 bnx2x_link_set(bp);
9963 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009964
9965 return 0;
9966}
9967
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009968static int bnx2x_set_flags(struct net_device *dev, u32 data)
9969{
9970 struct bnx2x *bp = netdev_priv(dev);
9971 int changed = 0;
9972 int rc = 0;
9973
9974 /* TPA requires Rx CSUM offloading */
9975 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
Vladislav Zolotarovd43a7e62010-02-17 02:03:40 +00009976 if (!disable_tpa) {
9977 if (!(dev->features & NETIF_F_LRO)) {
9978 dev->features |= NETIF_F_LRO;
9979 bp->flags |= TPA_ENABLE_FLAG;
9980 changed = 1;
9981 }
9982 } else
9983 rc = -EINVAL;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009984 } else if (dev->features & NETIF_F_LRO) {
9985 dev->features &= ~NETIF_F_LRO;
9986 bp->flags &= ~TPA_ENABLE_FLAG;
9987 changed = 1;
9988 }
9989
9990 if (changed && netif_running(dev)) {
9991 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9992 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9993 }
9994
9995 return rc;
9996}
9997
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009998static u32 bnx2x_get_rx_csum(struct net_device *dev)
9999{
10000 struct bnx2x *bp = netdev_priv(dev);
10001
10002 return bp->rx_csum;
10003}
10004
10005static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10006{
10007 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010008 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010009
10010 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -070010011
10012 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10013 TPA'ed packets will be discarded due to wrong TCP CSUM */
10014 if (!data) {
10015 u32 flags = ethtool_op_get_flags(dev);
10016
10017 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10018 }
10019
10020 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010021}
10022
10023static int bnx2x_set_tso(struct net_device *dev, u32 data)
10024{
Eilon Greenstein755735e2008-06-23 20:35:13 -070010025 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010026 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010027 dev->features |= NETIF_F_TSO6;
10028 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010029 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010030 dev->features &= ~NETIF_F_TSO6;
10031 }
10032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010033 return 0;
10034}
10035
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010036static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010037 char string[ETH_GSTRING_LEN];
10038} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010039 { "register_test (offline)" },
10040 { "memory_test (offline)" },
10041 { "loopback_test (offline)" },
10042 { "nvram_test (online)" },
10043 { "interrupt_test (online)" },
10044 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +000010045 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010046};
10047
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010048static int bnx2x_test_registers(struct bnx2x *bp)
10049{
10050 int idx, i, rc = -ENODEV;
10051 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010052 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010053 static const struct {
10054 u32 offset0;
10055 u32 offset1;
10056 u32 mask;
10057 } reg_tbl[] = {
10058/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10059 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10060 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10061 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10062 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10063 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10064 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10065 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10066 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10067 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10068/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10069 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10070 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10071 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10072 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10073 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10074 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10075 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010076 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010077 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10078/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010079 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10080 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10081 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10082 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10083 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10084 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10085 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10086 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +000010087 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10088/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010089 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10090 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10091 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10092 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10093 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10094 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10095
10096 { 0xffffffff, 0, 0x00000000 }
10097 };
10098
10099 if (!netif_running(bp->dev))
10100 return rc;
10101
10102 /* Repeat the test twice:
10103 First by writing 0x00000000, second by writing 0xffffffff */
10104 for (idx = 0; idx < 2; idx++) {
10105
10106 switch (idx) {
10107 case 0:
10108 wr_val = 0;
10109 break;
10110 case 1:
10111 wr_val = 0xffffffff;
10112 break;
10113 }
10114
10115 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10116 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010117
10118 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10119 mask = reg_tbl[i].mask;
10120
10121 save_val = REG_RD(bp, offset);
10122
10123 REG_WR(bp, offset, wr_val);
10124 val = REG_RD(bp, offset);
10125
10126 /* Restore the original register's value */
10127 REG_WR(bp, offset, save_val);
10128
10129 /* verify that value is as expected value */
10130 if ((val & mask) != (wr_val & mask))
10131 goto test_reg_exit;
10132 }
10133 }
10134
10135 rc = 0;
10136
10137test_reg_exit:
10138 return rc;
10139}
10140
10141static int bnx2x_test_memory(struct bnx2x *bp)
10142{
10143 int i, j, rc = -ENODEV;
10144 u32 val;
10145 static const struct {
10146 u32 offset;
10147 int size;
10148 } mem_tbl[] = {
10149 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10150 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10151 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10152 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10153 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10154 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10155 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10156
10157 { 0xffffffff, 0 }
10158 };
10159 static const struct {
10160 char *name;
10161 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010162 u32 e1_mask;
10163 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010164 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010165 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10166 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10167 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10168 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10169 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10170 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010171
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010172 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010173 };
10174
10175 if (!netif_running(bp->dev))
10176 return rc;
10177
10178 /* Go through all the memories */
10179 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10180 for (j = 0; j < mem_tbl[i].size; j++)
10181 REG_RD(bp, mem_tbl[i].offset + j*4);
10182
10183 /* Check the parity status */
10184 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10185 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -070010186 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10187 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010188 DP(NETIF_MSG_HW,
10189 "%s is 0x%x\n", prty_tbl[i].name, val);
10190 goto test_mem_exit;
10191 }
10192 }
10193
10194 rc = 0;
10195
10196test_mem_exit:
10197 return rc;
10198}
10199
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010200static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10201{
10202 int cnt = 1000;
10203
10204 if (link_up)
10205 while (bnx2x_link_test(bp) && cnt--)
10206 msleep(10);
10207}
10208
10209static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10210{
10211 unsigned int pkt_size, num_pkts, i;
10212 struct sk_buff *skb;
10213 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -070010214 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010215 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010216 u16 tx_start_idx, tx_idx;
10217 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -070010218 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010219 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010220 struct eth_tx_start_bd *tx_start_bd;
10221 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010222 dma_addr_t mapping;
10223 union eth_rx_cqe *cqe;
10224 u8 cqe_fp_flags;
10225 struct sw_rx_bd *rx_buf;
10226 u16 len;
10227 int rc = -ENODEV;
10228
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010229 /* check the loopback mode */
10230 switch (loopback_mode) {
10231 case BNX2X_PHY_LOOPBACK:
10232 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10233 return -EINVAL;
10234 break;
10235 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010236 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010237 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010238 break;
10239 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010240 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010241 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010242
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010243 /* prepare the loopback packet */
10244 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10245 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010246 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10247 if (!skb) {
10248 rc = -ENOMEM;
10249 goto test_loopback_exit;
10250 }
10251 packet = skb_put(skb, pkt_size);
10252 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -070010253 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10254 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010255 for (i = ETH_HLEN; i < pkt_size; i++)
10256 packet[i] = (unsigned char) (i & 0xff);
10257
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010258 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010259 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010260 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10261 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010262
Eilon Greensteinca003922009-08-12 22:53:28 -070010263 pkt_prod = fp_tx->tx_pkt_prod++;
10264 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10265 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010266 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010267 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010268
Eilon Greensteinca003922009-08-12 22:53:28 -070010269 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10270 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010271 mapping = pci_map_single(bp->pdev, skb->data,
10272 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -070010273 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10274 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10275 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10276 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10277 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10278 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10279 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10280 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10281
10282 /* turn on parsing and get a BD */
10283 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10284 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10285
10286 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010287
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010288 wmb();
10289
Eilon Greensteinca003922009-08-12 22:53:28 -070010290 fp_tx->tx_db.data.prod += 2;
10291 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010292 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010293
10294 mmiowb();
10295
10296 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010297 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010298
10299 udelay(100);
10300
Eilon Greensteinca003922009-08-12 22:53:28 -070010301 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010302 if (tx_idx != tx_start_idx + num_pkts)
10303 goto test_loopback_exit;
10304
Eilon Greensteinca003922009-08-12 22:53:28 -070010305 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010306 if (rx_idx != rx_start_idx + num_pkts)
10307 goto test_loopback_exit;
10308
Eilon Greensteinca003922009-08-12 22:53:28 -070010309 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010310 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10311 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10312 goto test_loopback_rx_exit;
10313
10314 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10315 if (len != pkt_size)
10316 goto test_loopback_rx_exit;
10317
Eilon Greensteinca003922009-08-12 22:53:28 -070010318 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010319 skb = rx_buf->skb;
10320 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10321 for (i = ETH_HLEN; i < pkt_size; i++)
10322 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10323 goto test_loopback_rx_exit;
10324
10325 rc = 0;
10326
10327test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010328
Eilon Greensteinca003922009-08-12 22:53:28 -070010329 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10330 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10331 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10332 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010333
10334 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010335 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10336 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010337
10338test_loopback_exit:
10339 bp->link_params.loopback_mode = LOOPBACK_NONE;
10340
10341 return rc;
10342}
10343
10344static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10345{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010346 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010347
10348 if (!netif_running(bp->dev))
10349 return BNX2X_LOOPBACK_FAILED;
10350
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010351 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010352 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010353
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010354 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10355 if (res) {
10356 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10357 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010358 }
10359
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010360 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10361 if (res) {
10362 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10363 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010364 }
10365
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010366 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010367 bnx2x_netif_start(bp);
10368
10369 return rc;
10370}
10371
10372#define CRC32_RESIDUAL 0xdebb20e3
10373
10374static int bnx2x_test_nvram(struct bnx2x *bp)
10375{
10376 static const struct {
10377 int offset;
10378 int size;
10379 } nvram_tbl[] = {
10380 { 0, 0x14 }, /* bootstrap */
10381 { 0x14, 0xec }, /* dir */
10382 { 0x100, 0x350 }, /* manuf_info */
10383 { 0x450, 0xf0 }, /* feature_info */
10384 { 0x640, 0x64 }, /* upgrade_key_info */
10385 { 0x6a4, 0x64 },
10386 { 0x708, 0x70 }, /* manuf_key_info */
10387 { 0x778, 0x70 },
10388 { 0, 0 }
10389 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010390 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010391 u8 *data = (u8 *)buf;
10392 int i, rc;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010393 u32 magic, crc;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010394
10395 rc = bnx2x_nvram_read(bp, 0, data, 4);
10396 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010397 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010398 goto test_nvram_exit;
10399 }
10400
10401 magic = be32_to_cpu(buf[0]);
10402 if (magic != 0x669955aa) {
10403 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10404 rc = -ENODEV;
10405 goto test_nvram_exit;
10406 }
10407
10408 for (i = 0; nvram_tbl[i].size; i++) {
10409
10410 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10411 nvram_tbl[i].size);
10412 if (rc) {
10413 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010414 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010415 goto test_nvram_exit;
10416 }
10417
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010418 crc = ether_crc_le(nvram_tbl[i].size, data);
10419 if (crc != CRC32_RESIDUAL) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010420 DP(NETIF_MSG_PROBE,
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000010421 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010422 rc = -ENODEV;
10423 goto test_nvram_exit;
10424 }
10425 }
10426
10427test_nvram_exit:
10428 return rc;
10429}
10430
10431static int bnx2x_test_intr(struct bnx2x *bp)
10432{
10433 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10434 int i, rc;
10435
10436 if (!netif_running(bp->dev))
10437 return -ENODEV;
10438
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010439 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010440 if (CHIP_IS_E1(bp))
Vladislav Zolotarov0c43f432010-02-17 02:04:00 +000010441 /* use last unicast entries */
10442 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
Eilon Greensteinaf246402009-01-14 06:43:59 +000010443 else
10444 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010445 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010446 config->hdr.reserved1 = 0;
10447
Michael Chane665bfd2009-10-10 13:46:54 +000010448 bp->set_mac_pending++;
10449 smp_wmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010450 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10451 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10452 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10453 if (rc == 0) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010454 for (i = 0; i < 10; i++) {
10455 if (!bp->set_mac_pending)
10456 break;
Michael Chane665bfd2009-10-10 13:46:54 +000010457 smp_rmb();
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010458 msleep_interruptible(10);
10459 }
10460 if (i == 10)
10461 rc = -ENODEV;
10462 }
10463
10464 return rc;
10465}
10466
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010467static void bnx2x_self_test(struct net_device *dev,
10468 struct ethtool_test *etest, u64 *buf)
10469{
10470 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010471
10472 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10473
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010474 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010475 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010476
Eilon Greenstein33471622008-08-13 15:59:08 -070010477 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010478 if (IS_E1HMF(bp))
10479 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10480
10481 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010482 int port = BP_PORT(bp);
10483 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010484 u8 link_up;
10485
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010486 /* save current value of input enable for TX port IF */
10487 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10488 /* disable input for TX port IF */
10489 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10490
Eilon Greenstein061bc702009-10-15 00:18:47 -070010491 link_up = (bnx2x_link_test(bp) == 0);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010492 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10493 bnx2x_nic_load(bp, LOAD_DIAG);
10494 /* wait until link state is restored */
10495 bnx2x_wait_for_link(bp, link_up);
10496
10497 if (bnx2x_test_registers(bp) != 0) {
10498 buf[0] = 1;
10499 etest->flags |= ETH_TEST_FL_FAILED;
10500 }
10501 if (bnx2x_test_memory(bp) != 0) {
10502 buf[1] = 1;
10503 etest->flags |= ETH_TEST_FL_FAILED;
10504 }
10505 buf[2] = bnx2x_test_loopback(bp, link_up);
10506 if (buf[2] != 0)
10507 etest->flags |= ETH_TEST_FL_FAILED;
10508
10509 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010510
10511 /* restore input for TX port IF */
10512 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10513
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010514 bnx2x_nic_load(bp, LOAD_NORMAL);
10515 /* wait until link state is restored */
10516 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010517 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010518 if (bnx2x_test_nvram(bp) != 0) {
10519 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010520 etest->flags |= ETH_TEST_FL_FAILED;
10521 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010522 if (bnx2x_test_intr(bp) != 0) {
10523 buf[4] = 1;
10524 etest->flags |= ETH_TEST_FL_FAILED;
10525 }
10526 if (bp->port.pmf)
10527 if (bnx2x_link_test(bp) != 0) {
10528 buf[5] = 1;
10529 etest->flags |= ETH_TEST_FL_FAILED;
10530 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010531
10532#ifdef BNX2X_EXTRA_DEBUG
10533 bnx2x_panic_dump(bp);
10534#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010535}
10536
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010537static const struct {
10538 long offset;
10539 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010540 u8 string[ETH_GSTRING_LEN];
10541} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10542/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10543 { Q_STATS_OFFSET32(error_bytes_received_hi),
10544 8, "[%d]: rx_error_bytes" },
10545 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10546 8, "[%d]: rx_ucast_packets" },
10547 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10548 8, "[%d]: rx_mcast_packets" },
10549 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10550 8, "[%d]: rx_bcast_packets" },
10551 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10552 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10553 4, "[%d]: rx_phy_ip_err_discards"},
10554 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10555 4, "[%d]: rx_skb_alloc_discard" },
10556 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10557
10558/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10559 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10560 8, "[%d]: tx_packets" }
10561};
10562
10563static const struct {
10564 long offset;
10565 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010566 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010567#define STATS_FLAGS_PORT 1
10568#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010569#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010570 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010571} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010572/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10573 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010574 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010575 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010576 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010577 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010578 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010579 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010580 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010581 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010582 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010583 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010584 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010585 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010586 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10587 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10588 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10589 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10590/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10591 8, STATS_FLAGS_PORT, "rx_fragments" },
10592 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10593 8, STATS_FLAGS_PORT, "rx_jabbers" },
10594 { STATS_OFFSET32(no_buff_discard_hi),
10595 8, STATS_FLAGS_BOTH, "rx_discards" },
10596 { STATS_OFFSET32(mac_filter_discard),
10597 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10598 { STATS_OFFSET32(xxoverflow_discard),
10599 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10600 { STATS_OFFSET32(brb_drop_hi),
10601 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10602 { STATS_OFFSET32(brb_truncate_hi),
10603 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10604 { STATS_OFFSET32(pause_frames_received_hi),
10605 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10606 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10607 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10608 { STATS_OFFSET32(nig_timer_max),
10609 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10610/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10611 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10612 { STATS_OFFSET32(rx_skb_alloc_failed),
10613 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10614 { STATS_OFFSET32(hw_csum_err),
10615 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10616
10617 { STATS_OFFSET32(total_bytes_transmitted_hi),
10618 8, STATS_FLAGS_BOTH, "tx_bytes" },
10619 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10620 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10621 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10622 8, STATS_FLAGS_BOTH, "tx_packets" },
10623 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10624 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10625 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10626 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010627 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010628 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010629 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010630 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010631/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010632 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010633 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010634 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010635 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010636 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010637 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010638 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010639 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010640 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010641 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010642 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010643 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010644 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010645 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010646 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010647 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010648 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010649 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010650 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010651/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010652 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010653 { STATS_OFFSET32(pause_frames_sent_hi),
10654 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010655};
10656
Eilon Greensteinde832a52009-02-12 08:36:33 +000010657#define IS_PORT_STAT(i) \
10658 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10659#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10660#define IS_E1HMF_MODE_STAT(bp) \
Joe Perches7995c642010-02-17 15:01:52 +000010661 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010662
Ben Hutchings15f0a392009-10-01 11:58:24 +000010663static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10664{
10665 struct bnx2x *bp = netdev_priv(dev);
10666 int i, num_stats;
10667
10668 switch(stringset) {
10669 case ETH_SS_STATS:
10670 if (is_multi(bp)) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010671 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
Ben Hutchings15f0a392009-10-01 11:58:24 +000010672 if (!IS_E1HMF_MODE_STAT(bp))
10673 num_stats += BNX2X_NUM_STATS;
10674 } else {
10675 if (IS_E1HMF_MODE_STAT(bp)) {
10676 num_stats = 0;
10677 for (i = 0; i < BNX2X_NUM_STATS; i++)
10678 if (IS_FUNC_STAT(i))
10679 num_stats++;
10680 } else
10681 num_stats = BNX2X_NUM_STATS;
10682 }
10683 return num_stats;
10684
10685 case ETH_SS_TEST:
10686 return BNX2X_NUM_TESTS;
10687
10688 default:
10689 return -EINVAL;
10690 }
10691}
10692
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010693static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10694{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010695 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010696 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010697
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010698 switch (stringset) {
10699 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010700 if (is_multi(bp)) {
10701 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010702 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010703 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10704 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10705 bnx2x_q_stats_arr[j].string, i);
10706 k += BNX2X_NUM_Q_STATS;
10707 }
10708 if (IS_E1HMF_MODE_STAT(bp))
10709 break;
10710 for (j = 0; j < BNX2X_NUM_STATS; j++)
10711 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10712 bnx2x_stats_arr[j].string);
10713 } else {
10714 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10715 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10716 continue;
10717 strcpy(buf + j*ETH_GSTRING_LEN,
10718 bnx2x_stats_arr[i].string);
10719 j++;
10720 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010721 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010722 break;
10723
10724 case ETH_SS_TEST:
10725 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10726 break;
10727 }
10728}
10729
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010730static void bnx2x_get_ethtool_stats(struct net_device *dev,
10731 struct ethtool_stats *stats, u64 *buf)
10732{
10733 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010734 u32 *hw_stats, *offset;
10735 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010736
Eilon Greensteinde832a52009-02-12 08:36:33 +000010737 if (is_multi(bp)) {
10738 k = 0;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010739 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010740 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10741 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10742 if (bnx2x_q_stats_arr[j].size == 0) {
10743 /* skip this counter */
10744 buf[k + j] = 0;
10745 continue;
10746 }
10747 offset = (hw_stats +
10748 bnx2x_q_stats_arr[j].offset);
10749 if (bnx2x_q_stats_arr[j].size == 4) {
10750 /* 4-byte counter */
10751 buf[k + j] = (u64) *offset;
10752 continue;
10753 }
10754 /* 8-byte counter */
10755 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10756 }
10757 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010758 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010759 if (IS_E1HMF_MODE_STAT(bp))
10760 return;
10761 hw_stats = (u32 *)&bp->eth_stats;
10762 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10763 if (bnx2x_stats_arr[j].size == 0) {
10764 /* skip this counter */
10765 buf[k + j] = 0;
10766 continue;
10767 }
10768 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10769 if (bnx2x_stats_arr[j].size == 4) {
10770 /* 4-byte counter */
10771 buf[k + j] = (u64) *offset;
10772 continue;
10773 }
10774 /* 8-byte counter */
10775 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010776 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010777 } else {
10778 hw_stats = (u32 *)&bp->eth_stats;
10779 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10780 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10781 continue;
10782 if (bnx2x_stats_arr[i].size == 0) {
10783 /* skip this counter */
10784 buf[j] = 0;
10785 j++;
10786 continue;
10787 }
10788 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10789 if (bnx2x_stats_arr[i].size == 4) {
10790 /* 4-byte counter */
10791 buf[j] = (u64) *offset;
10792 j++;
10793 continue;
10794 }
10795 /* 8-byte counter */
10796 buf[j] = HILO_U64(*offset, *(offset + 1));
10797 j++;
10798 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010799 }
10800}
10801
10802static int bnx2x_phys_id(struct net_device *dev, u32 data)
10803{
10804 struct bnx2x *bp = netdev_priv(dev);
10805 int i;
10806
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010807 if (!netif_running(dev))
10808 return 0;
10809
10810 if (!bp->port.pmf)
10811 return 0;
10812
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010813 if (data == 0)
10814 data = 2;
10815
10816 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010817 if ((i % 2) == 0)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010818 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10819 SPEED_1000);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010820 else
Yaniv Rosner7846e472009-11-05 19:18:07 +020010821 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010822
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010823 msleep_interruptible(500);
10824 if (signal_pending(current))
10825 break;
10826 }
10827
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010828 if (bp->link_vars.link_up)
Yaniv Rosner7846e472009-11-05 19:18:07 +020010829 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10830 bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010831
10832 return 0;
10833}
10834
Stephen Hemminger0fc0b732009-09-02 01:03:33 -070010835static const struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010836 .get_settings = bnx2x_get_settings,
10837 .set_settings = bnx2x_set_settings,
10838 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010839 .get_regs_len = bnx2x_get_regs_len,
10840 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010841 .get_wol = bnx2x_get_wol,
10842 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010843 .get_msglevel = bnx2x_get_msglevel,
10844 .set_msglevel = bnx2x_set_msglevel,
10845 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010846 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010847 .get_eeprom_len = bnx2x_get_eeprom_len,
10848 .get_eeprom = bnx2x_get_eeprom,
10849 .set_eeprom = bnx2x_set_eeprom,
10850 .get_coalesce = bnx2x_get_coalesce,
10851 .set_coalesce = bnx2x_set_coalesce,
10852 .get_ringparam = bnx2x_get_ringparam,
10853 .set_ringparam = bnx2x_set_ringparam,
10854 .get_pauseparam = bnx2x_get_pauseparam,
10855 .set_pauseparam = bnx2x_set_pauseparam,
10856 .get_rx_csum = bnx2x_get_rx_csum,
10857 .set_rx_csum = bnx2x_set_rx_csum,
10858 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010859 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010860 .set_flags = bnx2x_set_flags,
10861 .get_flags = ethtool_op_get_flags,
10862 .get_sg = ethtool_op_get_sg,
10863 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010864 .get_tso = ethtool_op_get_tso,
10865 .set_tso = bnx2x_set_tso,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010866 .self_test = bnx2x_self_test,
Ben Hutchings15f0a392009-10-01 11:58:24 +000010867 .get_sset_count = bnx2x_get_sset_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010868 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010869 .phys_id = bnx2x_phys_id,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010870 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010871};
10872
10873/* end of ethtool_ops */
10874
10875/****************************************************************************
10876* General service functions
10877****************************************************************************/
10878
10879static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10880{
10881 u16 pmcsr;
10882
10883 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10884
10885 switch (state) {
10886 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010887 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010888 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10889 PCI_PM_CTRL_PME_STATUS));
10890
10891 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010892 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010893 msleep(20);
10894 break;
10895
10896 case PCI_D3hot:
10897 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10898 pmcsr |= 3;
10899
10900 if (bp->wol)
10901 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10902
10903 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10904 pmcsr);
10905
10906 /* No more memory access after this point until
10907 * device is brought back to D0.
10908 */
10909 break;
10910
10911 default:
10912 return -EINVAL;
10913 }
10914 return 0;
10915}
10916
Eilon Greenstein237907c2009-01-14 06:42:44 +000010917static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10918{
10919 u16 rx_cons_sb;
10920
10921 /* Tell compiler that status block fields can change */
10922 barrier();
10923 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10924 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10925 rx_cons_sb++;
10926 return (fp->rx_comp_cons != rx_cons_sb);
10927}
10928
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010929/*
10930 * net_device service functions
10931 */
10932
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010933static int bnx2x_poll(struct napi_struct *napi, int budget)
10934{
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010935 int work_done = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010936 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10937 napi);
10938 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010939
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010940 while (1) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010941#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010942 if (unlikely(bp->panic)) {
10943 napi_complete(napi);
10944 return 0;
10945 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010946#endif
10947
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010948 if (bnx2x_has_tx_work(fp))
10949 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010950
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010951 if (bnx2x_has_rx_work(fp)) {
10952 work_done += bnx2x_rx_int(fp, budget - work_done);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010953
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010954 /* must not complete if we consumed full budget */
10955 if (work_done >= budget)
10956 break;
10957 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010958
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000010959 /* Fall out from the NAPI loop if needed */
10960 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10961 bnx2x_update_fpsb_idx(fp);
10962 /* bnx2x_has_rx_work() reads the status block, thus we need
10963 * to ensure that status block indices have been actually read
10964 * (bnx2x_update_fpsb_idx) prior to this check
10965 * (bnx2x_has_rx_work) so that we won't write the "newer"
10966 * value of the status block to IGU (if there was a DMA right
10967 * after bnx2x_has_rx_work and if there is no rmb, the memory
10968 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10969 * before bnx2x_ack_sb). In this case there will never be
10970 * another interrupt until there is another update of the
10971 * status block, while there is still unhandled work.
10972 */
10973 rmb();
10974
10975 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10976 napi_complete(napi);
10977 /* Re-enable interrupts */
10978 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10979 le16_to_cpu(fp->fp_c_idx),
10980 IGU_INT_NOP, 1);
10981 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10982 le16_to_cpu(fp->fp_u_idx),
10983 IGU_INT_ENABLE, 1);
10984 break;
10985 }
10986 }
Eilon Greenstein8534f322009-03-02 07:59:45 +000010987 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010989 return work_done;
10990}
10991
Eilon Greenstein755735e2008-06-23 20:35:13 -070010992
10993/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010994 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070010995 * we use one mapping for both BDs
10996 * So far this has only been observed to happen
10997 * in Other Operating Systems(TM)
10998 */
10999static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11000 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070011001 struct sw_tx_bd *tx_buf,
11002 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011003 u16 bd_prod, int nbd)
11004{
Eilon Greensteinca003922009-08-12 22:53:28 -070011005 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011006 struct eth_tx_bd *d_tx_bd;
11007 dma_addr_t mapping;
11008 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11009
11010 /* first fix first BD */
11011 h_tx_bd->nbd = cpu_to_le16(nbd);
11012 h_tx_bd->nbytes = cpu_to_le16(hlen);
11013
11014 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11015 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11016 h_tx_bd->addr_lo, h_tx_bd->nbd);
11017
11018 /* now get a new data BD
11019 * (after the pbd) and fill it */
11020 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011021 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011022
11023 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11024 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11025
11026 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11027 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11028 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011029
11030 /* this marks the BD as one that has no individual mapping */
11031 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11032
Eilon Greenstein755735e2008-06-23 20:35:13 -070011033 DP(NETIF_MSG_TX_QUEUED,
11034 "TSO split data size is %d (%x:%x)\n",
11035 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11036
Eilon Greensteinca003922009-08-12 22:53:28 -070011037 /* update tx_bd */
11038 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011039
11040 return bd_prod;
11041}
11042
11043static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11044{
11045 if (fix > 0)
11046 csum = (u16) ~csum_fold(csum_sub(csum,
11047 csum_partial(t_header - fix, fix, 0)));
11048
11049 else if (fix < 0)
11050 csum = (u16) ~csum_fold(csum_add(csum,
11051 csum_partial(t_header, -fix, 0)));
11052
11053 return swab16(csum);
11054}
11055
11056static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11057{
11058 u32 rc;
11059
11060 if (skb->ip_summed != CHECKSUM_PARTIAL)
11061 rc = XMIT_PLAIN;
11062
11063 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011064 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070011065 rc = XMIT_CSUM_V6;
11066 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11067 rc |= XMIT_CSUM_TCP;
11068
11069 } else {
11070 rc = XMIT_CSUM_V4;
11071 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11072 rc |= XMIT_CSUM_TCP;
11073 }
11074 }
11075
11076 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011077 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011078
11079 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
Eilon Greensteind6a2f982009-11-09 06:09:22 +000011080 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011081
11082 return rc;
11083}
11084
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011085#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011086/* check if packet requires linearization (packet is too fragmented)
11087 no need to check fragmentation if page size > 8K (there will be no
11088 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011089static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11090 u32 xmit_type)
11091{
11092 int to_copy = 0;
11093 int hlen = 0;
11094 int first_bd_sz = 0;
11095
11096 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11097 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11098
11099 if (xmit_type & XMIT_GSO) {
11100 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11101 /* Check if LSO packet needs to be copied:
11102 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11103 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070011104 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011105 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11106 int wnd_idx = 0;
11107 int frag_idx = 0;
11108 u32 wnd_sum = 0;
11109
11110 /* Headers length */
11111 hlen = (int)(skb_transport_header(skb) - skb->data) +
11112 tcp_hdrlen(skb);
11113
11114 /* Amount of data (w/o headers) on linear part of SKB*/
11115 first_bd_sz = skb_headlen(skb) - hlen;
11116
11117 wnd_sum = first_bd_sz;
11118
11119 /* Calculate the first sum - it's special */
11120 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11121 wnd_sum +=
11122 skb_shinfo(skb)->frags[frag_idx].size;
11123
11124 /* If there was data on linear skb data - check it */
11125 if (first_bd_sz > 0) {
11126 if (unlikely(wnd_sum < lso_mss)) {
11127 to_copy = 1;
11128 goto exit_lbl;
11129 }
11130
11131 wnd_sum -= first_bd_sz;
11132 }
11133
11134 /* Others are easier: run through the frag list and
11135 check all windows */
11136 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11137 wnd_sum +=
11138 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11139
11140 if (unlikely(wnd_sum < lso_mss)) {
11141 to_copy = 1;
11142 break;
11143 }
11144 wnd_sum -=
11145 skb_shinfo(skb)->frags[wnd_idx].size;
11146 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070011147 } else {
11148 /* in non-LSO too fragmented packet should always
11149 be linearized */
11150 to_copy = 1;
11151 }
11152 }
11153
11154exit_lbl:
11155 if (unlikely(to_copy))
11156 DP(NETIF_MSG_TX_QUEUED,
11157 "Linearization IS REQUIRED for %s packet. "
11158 "num_frags %d hlen %d first_bd_sz %d\n",
11159 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11160 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11161
11162 return to_copy;
11163}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011164#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011165
11166/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011167 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070011168 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011169 */
Stephen Hemminger613573252009-08-31 19:50:58 +000011170static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011171{
11172 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011173 struct bnx2x_fastpath *fp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011174 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011175 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070011176 struct eth_tx_start_bd *tx_start_bd;
11177 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011178 struct eth_tx_parse_bd *pbd = NULL;
11179 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011180 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011181 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011182 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011183 int i;
11184 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011185 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011186
11187#ifdef BNX2X_STOP_ON_ERROR
11188 if (unlikely(bp->panic))
11189 return NETDEV_TX_BUSY;
11190#endif
11191
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011192 fp_index = skb_get_queue_mapping(skb);
11193 txq = netdev_get_tx_queue(dev, fp_index);
11194
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011195 fp = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070011196
Yitchak Gertner231fd582008-08-25 15:27:06 -070011197 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011198 fp->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011199 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011200 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11201 return NETDEV_TX_BUSY;
11202 }
11203
Eilon Greenstein755735e2008-06-23 20:35:13 -070011204 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11205 " gso type %x xmit_type %x\n",
11206 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11207 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11208
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011209#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000011210 /* First, check if we need to linearize the skb (due to FW
11211 restrictions). No need to check fragmentation if page size > 8K
11212 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070011213 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11214 /* Statistics of linearization */
11215 bp->lin_cnt++;
11216 if (skb_linearize(skb) != 0) {
11217 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11218 "silently dropping this SKB\n");
11219 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011220 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011221 }
11222 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000011223#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070011224
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011225 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070011226 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070011227 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735e2008-06-23 20:35:13 -070011228 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011229 (don't forget to mark the last one as last,
11230 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070011231 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011232 */
11233
11234 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011235 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011236
Eilon Greenstein755735e2008-06-23 20:35:13 -070011237 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011238 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070011239 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011240
Eilon Greensteinca003922009-08-12 22:53:28 -070011241 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11242 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11243 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070011244 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070011245 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011246
Eilon Greenstein755735e2008-06-23 20:35:13 -070011247 /* remember the first BD of the packet */
11248 tx_buf->first_bd = fp->tx_bd_prod;
11249 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070011250 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011251
11252 DP(NETIF_MSG_TX_QUEUED,
11253 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011254 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011255
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011256#ifdef BCM_VLAN
11257 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11258 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011259 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11260 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011261 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011262#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070011263 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011264
Eilon Greensteinca003922009-08-12 22:53:28 -070011265 /* turn on parsing and get a BD */
11266 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11267 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011268
Eilon Greensteinca003922009-08-12 22:53:28 -070011269 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011270
11271 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011272 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011273
11274 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000011275 pbd->global_data =
11276 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11277 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011278
11279 pbd->ip_hlen = (skb_transport_header(skb) -
11280 skb_network_header(skb)) / 2;
11281
11282 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11283
11284 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070011285 hlen = hlen*2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011286
Eilon Greensteinca003922009-08-12 22:53:28 -070011287 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011288
11289 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011290 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735e2008-06-23 20:35:13 -070011291 ETH_TX_BD_FLAGS_IP_CSUM;
11292 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011293 tx_start_bd->bd_flags.as_bitfield |=
11294 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011295
11296 if (xmit_type & XMIT_CSUM_TCP) {
11297 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11298
11299 } else {
11300 s8 fix = SKB_CS_OFF(skb); /* signed! */
11301
Eilon Greensteinca003922009-08-12 22:53:28 -070011302 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011303
11304 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011305 "hlen %d fix %d csum before fix %x\n",
11306 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011307
11308 /* HW bug: fixup the CSUM */
11309 pbd->tcp_pseudo_csum =
11310 bnx2x_csum_fix(skb_transport_header(skb),
11311 SKB_CS(skb), fix);
11312
11313 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11314 pbd->tcp_pseudo_csum);
11315 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011316 }
11317
11318 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011319 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011320
Eilon Greensteinca003922009-08-12 22:53:28 -070011321 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11322 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11323 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11324 tx_start_bd->nbd = cpu_to_le16(nbd);
11325 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11326 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011327
11328 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070011329 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011330 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11331 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11332 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011333
Eilon Greenstein755735e2008-06-23 20:35:13 -070011334 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011335
11336 DP(NETIF_MSG_TX_QUEUED,
11337 "TSO packet len %d hlen %d total len %d tso size %d\n",
11338 skb->len, hlen, skb_headlen(skb),
11339 skb_shinfo(skb)->gso_size);
11340
Eilon Greensteinca003922009-08-12 22:53:28 -070011341 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011342
Eilon Greenstein755735e2008-06-23 20:35:13 -070011343 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011344 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11345 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011346
11347 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11348 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011349 pbd->tcp_flags = pbd_tcp_flags(skb);
11350
11351 if (xmit_type & XMIT_GSO_V4) {
11352 pbd->ip_id = swab16(ip_hdr(skb)->id);
11353 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011354 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11355 ip_hdr(skb)->daddr,
11356 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011357
11358 } else
11359 pbd->tcp_pseudo_csum =
11360 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11361 &ipv6_hdr(skb)->daddr,
11362 0, IPPROTO_TCP, 0));
11363
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011364 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11365 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011366 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011367
Eilon Greenstein755735e2008-06-23 20:35:13 -070011368 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11369 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011370
Eilon Greenstein755735e2008-06-23 20:35:13 -070011371 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011372 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11373 if (total_pkt_bd == NULL)
11374 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011375
Eilon Greenstein755735e2008-06-23 20:35:13 -070011376 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11377 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011378
Eilon Greensteinca003922009-08-12 22:53:28 -070011379 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11380 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11381 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11382 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011383
Eilon Greenstein755735e2008-06-23 20:35:13 -070011384 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011385 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11386 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11387 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011388 }
11389
Eilon Greensteinca003922009-08-12 22:53:28 -070011390 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011391
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011392 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11393
Eilon Greenstein755735e2008-06-23 20:35:13 -070011394 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011395 * if the packet contains or ends with it
11396 */
11397 if (TX_BD_POFF(bd_prod) < nbd)
11398 nbd++;
11399
Eilon Greensteinca003922009-08-12 22:53:28 -070011400 if (total_pkt_bd != NULL)
11401 total_pkt_bd->total_pkt_bytes = pkt_size;
11402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011403 if (pbd)
11404 DP(NETIF_MSG_TX_QUEUED,
11405 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11406 " tcp_flags %x xsum %x seq %u hlen %u\n",
11407 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11408 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011409 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011410
Eilon Greenstein755735e2008-06-23 20:35:13 -070011411 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011412
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011413 /*
11414 * Make sure that the BD data is updated before updating the producer
11415 * since FW might read the BD right after the producer is updated.
11416 * This is only applicable for weak-ordered memory model archs such
11417 * as IA-64. The following barrier is also mandatory since FW will
11418 * assumes packets must have BDs.
11419 */
11420 wmb();
11421
Eilon Greensteinca003922009-08-12 22:53:28 -070011422 fp->tx_db.data.prod += nbd;
11423 barrier();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011424 DOORBELL(bp, fp->index, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011425
11426 mmiowb();
11427
Eilon Greenstein755735e2008-06-23 20:35:13 -070011428 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011429
11430 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011431 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011432 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11433 if we put Tx into XOFF state. */
11434 smp_mb();
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011435 fp->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011436 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011437 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011438 }
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000011439 fp->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011440
11441 return NETDEV_TX_OK;
11442}
11443
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011444/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011445static int bnx2x_open(struct net_device *dev)
11446{
11447 struct bnx2x *bp = netdev_priv(dev);
11448
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011449 netif_carrier_off(dev);
11450
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011451 bnx2x_set_power_state(bp, PCI_D0);
11452
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011453 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011454}
11455
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011456/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011457static int bnx2x_close(struct net_device *dev)
11458{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011459 struct bnx2x *bp = netdev_priv(dev);
11460
11461 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011462 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11463 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11464 if (!CHIP_REV_IS_SLOW(bp))
11465 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011466
11467 return 0;
11468}
11469
Eilon Greensteinf5372252009-02-12 08:38:30 +000011470/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011471static void bnx2x_set_rx_mode(struct net_device *dev)
11472{
11473 struct bnx2x *bp = netdev_priv(dev);
11474 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11475 int port = BP_PORT(bp);
11476
11477 if (bp->state != BNX2X_STATE_OPEN) {
11478 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11479 return;
11480 }
11481
11482 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11483
11484 if (dev->flags & IFF_PROMISC)
11485 rx_mode = BNX2X_RX_MODE_PROMISC;
11486
11487 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +000011488 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11489 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011490 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11491
11492 else { /* some multicasts */
11493 if (CHIP_IS_E1(bp)) {
11494 int i, old, offset;
11495 struct dev_mc_list *mclist;
11496 struct mac_configuration_cmd *config =
11497 bnx2x_sp(bp, mcast_config);
11498
Jiri Pirko0ddf4772010-02-20 00:13:58 +000011499 i = 0;
11500 netdev_for_each_mc_addr(mclist, dev) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011501 config->config_table[i].
11502 cam_entry.msb_mac_addr =
11503 swab16(*(u16 *)&mclist->dmi_addr[0]);
11504 config->config_table[i].
11505 cam_entry.middle_mac_addr =
11506 swab16(*(u16 *)&mclist->dmi_addr[2]);
11507 config->config_table[i].
11508 cam_entry.lsb_mac_addr =
11509 swab16(*(u16 *)&mclist->dmi_addr[4]);
11510 config->config_table[i].cam_entry.flags =
11511 cpu_to_le16(port);
11512 config->config_table[i].
11513 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011514 config->config_table[i].target_table_entry.
11515 clients_bit_vector =
11516 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011517 config->config_table[i].
11518 target_table_entry.vlan_id = 0;
11519
11520 DP(NETIF_MSG_IFUP,
11521 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11522 config->config_table[i].
11523 cam_entry.msb_mac_addr,
11524 config->config_table[i].
11525 cam_entry.middle_mac_addr,
11526 config->config_table[i].
11527 cam_entry.lsb_mac_addr);
Jiri Pirko0ddf4772010-02-20 00:13:58 +000011528 i++;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011529 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011530 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011531 if (old > i) {
11532 for (; i < old; i++) {
11533 if (CAM_IS_INVALID(config->
11534 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011535 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011536 break;
11537 }
11538 /* invalidate */
11539 CAM_INVALIDATE(config->
11540 config_table[i]);
11541 }
11542 }
11543
11544 if (CHIP_REV_IS_SLOW(bp))
11545 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11546 else
11547 offset = BNX2X_MAX_MULTICAST*(1 + port);
11548
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011549 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011550 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011551 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011552 config->hdr.reserved1 = 0;
11553
Michael Chane665bfd2009-10-10 13:46:54 +000011554 bp->set_mac_pending++;
11555 smp_wmb();
11556
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011557 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11558 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11559 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11560 0);
11561 } else { /* E1H */
11562 /* Accept one or more multicasts */
11563 struct dev_mc_list *mclist;
11564 u32 mc_filter[MC_HASH_SIZE];
11565 u32 crc, bit, regidx;
11566 int i;
11567
11568 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11569
Jiri Pirko0ddf4772010-02-20 00:13:58 +000011570 netdev_for_each_mc_addr(mclist, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -070011571 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11572 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011573
11574 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11575 bit = (crc >> 24) & 0xff;
11576 regidx = bit >> 5;
11577 bit &= 0x1f;
11578 mc_filter[regidx] |= (1 << bit);
11579 }
11580
11581 for (i = 0; i < MC_HASH_SIZE; i++)
11582 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11583 mc_filter[i]);
11584 }
11585 }
11586
11587 bp->rx_mode = rx_mode;
11588 bnx2x_set_storm_rx_mode(bp);
11589}
11590
11591/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011592static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11593{
11594 struct sockaddr *addr = p;
11595 struct bnx2x *bp = netdev_priv(dev);
11596
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011597 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011598 return -EINVAL;
11599
11600 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011601 if (netif_running(dev)) {
11602 if (CHIP_IS_E1(bp))
Michael Chane665bfd2009-10-10 13:46:54 +000011603 bnx2x_set_eth_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011604 else
Michael Chane665bfd2009-10-10 13:46:54 +000011605 bnx2x_set_eth_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011606 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011607
11608 return 0;
11609}
11610
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011611/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011612static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11613 int devad, u16 addr)
11614{
11615 struct bnx2x *bp = netdev_priv(netdev);
11616 u16 value;
11617 int rc;
11618 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11619
11620 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11621 prtad, devad, addr);
11622
11623 if (prtad != bp->mdio.prtad) {
11624 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11625 prtad, bp->mdio.prtad);
11626 return -EINVAL;
11627 }
11628
11629 /* The HW expects different devad if CL22 is used */
11630 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11631
11632 bnx2x_acquire_phy_lock(bp);
11633 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11634 devad, addr, &value);
11635 bnx2x_release_phy_lock(bp);
11636 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11637
11638 if (!rc)
11639 rc = value;
11640 return rc;
11641}
11642
11643/* called with rtnl_lock */
11644static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11645 u16 addr, u16 value)
11646{
11647 struct bnx2x *bp = netdev_priv(netdev);
11648 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11649 int rc;
11650
11651 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11652 " value 0x%x\n", prtad, devad, addr, value);
11653
11654 if (prtad != bp->mdio.prtad) {
11655 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11656 prtad, bp->mdio.prtad);
11657 return -EINVAL;
11658 }
11659
11660 /* The HW expects different devad if CL22 is used */
11661 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11662
11663 bnx2x_acquire_phy_lock(bp);
11664 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11665 devad, addr, value);
11666 bnx2x_release_phy_lock(bp);
11667 return rc;
11668}
11669
11670/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011671static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11672{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011673 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011674 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011675
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011676 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11677 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011678
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011679 if (!netif_running(dev))
11680 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011681
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011682 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011683}
11684
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011685/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011686static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11687{
11688 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011689 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011690
11691 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11692 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11693 return -EINVAL;
11694
11695 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011696 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011697 * only updated as part of load
11698 */
11699 dev->mtu = new_mtu;
11700
11701 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011702 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11703 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011704 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011705
11706 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011707}
11708
11709static void bnx2x_tx_timeout(struct net_device *dev)
11710{
11711 struct bnx2x *bp = netdev_priv(dev);
11712
11713#ifdef BNX2X_STOP_ON_ERROR
11714 if (!bp->panic)
11715 bnx2x_panic();
11716#endif
11717 /* This allows the netif to be shutdown gracefully before resetting */
11718 schedule_work(&bp->reset_task);
11719}
11720
11721#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011722/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011723static void bnx2x_vlan_rx_register(struct net_device *dev,
11724 struct vlan_group *vlgrp)
11725{
11726 struct bnx2x *bp = netdev_priv(dev);
11727
11728 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011729
11730 /* Set flags according to the required capabilities */
11731 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11732
11733 if (dev->features & NETIF_F_HW_VLAN_TX)
11734 bp->flags |= HW_VLAN_TX_FLAG;
11735
11736 if (dev->features & NETIF_F_HW_VLAN_RX)
11737 bp->flags |= HW_VLAN_RX_FLAG;
11738
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011739 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011740 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011741}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011742
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011743#endif
11744
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000011745#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011746static void poll_bnx2x(struct net_device *dev)
11747{
11748 struct bnx2x *bp = netdev_priv(dev);
11749
11750 disable_irq(bp->pdev->irq);
11751 bnx2x_interrupt(bp->pdev->irq, dev);
11752 enable_irq(bp->pdev->irq);
11753}
11754#endif
11755
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011756static const struct net_device_ops bnx2x_netdev_ops = {
11757 .ndo_open = bnx2x_open,
11758 .ndo_stop = bnx2x_close,
11759 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011760 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011761 .ndo_set_mac_address = bnx2x_change_mac_addr,
11762 .ndo_validate_addr = eth_validate_addr,
11763 .ndo_do_ioctl = bnx2x_ioctl,
11764 .ndo_change_mtu = bnx2x_change_mtu,
11765 .ndo_tx_timeout = bnx2x_tx_timeout,
11766#ifdef BCM_VLAN
11767 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11768#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +000011769#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011770 .ndo_poll_controller = poll_bnx2x,
11771#endif
11772};
11773
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011774static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11775 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011776{
11777 struct bnx2x *bp;
11778 int rc;
11779
11780 SET_NETDEV_DEV(dev, &pdev->dev);
11781 bp = netdev_priv(dev);
11782
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011783 bp->dev = dev;
11784 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011785 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011786 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011787
11788 rc = pci_enable_device(pdev);
11789 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000011790 pr_err("Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011791 goto err_out;
11792 }
11793
11794 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Joe Perches7995c642010-02-17 15:01:52 +000011795 pr_err("Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011796 rc = -ENODEV;
11797 goto err_out_disable;
11798 }
11799
11800 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Joe Perches7995c642010-02-17 15:01:52 +000011801 pr_err("Cannot find second PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011802 rc = -ENODEV;
11803 goto err_out_disable;
11804 }
11805
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011806 if (atomic_read(&pdev->enable_cnt) == 1) {
11807 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11808 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000011809 pr_err("Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011810 goto err_out_disable;
11811 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011812
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011813 pci_set_master(pdev);
11814 pci_save_state(pdev);
11815 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011816
11817 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11818 if (bp->pm_cap == 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011819 pr_err("Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011820 rc = -EIO;
11821 goto err_out_release;
11822 }
11823
11824 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11825 if (bp->pcie_cap == 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011826 pr_err("Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011827 rc = -EIO;
11828 goto err_out_release;
11829 }
11830
Yang Hongyang6a355282009-04-06 19:01:13 -070011831 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011832 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011833 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011834 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011835 rc = -EIO;
11836 goto err_out_release;
11837 }
11838
Yang Hongyang284901a2009-04-06 19:01:15 -070011839 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Joe Perches7995c642010-02-17 15:01:52 +000011840 pr_err("System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011841 rc = -EIO;
11842 goto err_out_release;
11843 }
11844
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011845 dev->mem_start = pci_resource_start(pdev, 0);
11846 dev->base_addr = dev->mem_start;
11847 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011848
11849 dev->irq = pdev->irq;
11850
Arjan van de Ven275f1652008-10-20 21:42:39 -070011851 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011852 if (!bp->regview) {
Joe Perches7995c642010-02-17 15:01:52 +000011853 pr_err("Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011854 rc = -ENOMEM;
11855 goto err_out_release;
11856 }
11857
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011858 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11859 min_t(u64, BNX2X_DB_SIZE,
11860 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011861 if (!bp->doorbells) {
Joe Perches7995c642010-02-17 15:01:52 +000011862 pr_err("Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011863 rc = -ENOMEM;
11864 goto err_out_unmap;
11865 }
11866
11867 bnx2x_set_power_state(bp, PCI_D0);
11868
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011869 /* clean indirect addresses */
11870 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11871 PCICFG_VENDOR_ID_OFFSET);
11872 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11873 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11874 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11875 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011876
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011877 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011878
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011879 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011880 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011881 dev->features |= NETIF_F_SG;
11882 dev->features |= NETIF_F_HW_CSUM;
11883 if (bp->flags & USING_DAC_FLAG)
11884 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011885 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11886 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011887#ifdef BCM_VLAN
11888 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011889 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011890
11891 dev->vlan_features |= NETIF_F_SG;
11892 dev->vlan_features |= NETIF_F_HW_CSUM;
11893 if (bp->flags & USING_DAC_FLAG)
11894 dev->vlan_features |= NETIF_F_HIGHDMA;
11895 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11896 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011897#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011898
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011899 /* get_port_hwinfo() will set prtad and mmds properly */
11900 bp->mdio.prtad = MDIO_PRTAD_NONE;
11901 bp->mdio.mmds = 0;
11902 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11903 bp->mdio.dev = dev;
11904 bp->mdio.mdio_read = bnx2x_mdio_read;
11905 bp->mdio.mdio_write = bnx2x_mdio_write;
11906
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011907 return 0;
11908
11909err_out_unmap:
11910 if (bp->regview) {
11911 iounmap(bp->regview);
11912 bp->regview = NULL;
11913 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011914 if (bp->doorbells) {
11915 iounmap(bp->doorbells);
11916 bp->doorbells = NULL;
11917 }
11918
11919err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011920 if (atomic_read(&pdev->enable_cnt) == 1)
11921 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011922
11923err_out_disable:
11924 pci_disable_device(pdev);
11925 pci_set_drvdata(pdev, NULL);
11926
11927err_out:
11928 return rc;
11929}
11930
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011931static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11932 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -080011933{
11934 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11935
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011936 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11937
11938 /* return value of 1=2.5GHz 2=5GHz */
11939 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -080011940}
11941
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011942static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11943{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011944 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011945 struct bnx2x_fw_file_hdr *fw_hdr;
11946 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011947 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011948 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011949 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000011950 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011951
11952 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11953 return -EINVAL;
11954
11955 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11956 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11957
11958 /* Make sure none of the offsets and sizes make us read beyond
11959 * the end of the firmware data */
11960 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11961 offset = be32_to_cpu(sections[i].offset);
11962 len = be32_to_cpu(sections[i].len);
11963 if (offset + len > firmware->size) {
Joe Perches7995c642010-02-17 15:01:52 +000011964 pr_err("Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011965 return -EINVAL;
11966 }
11967 }
11968
11969 /* Likewise for the init_ops offsets */
11970 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11971 ops_offsets = (u16 *)(firmware->data + offset);
11972 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11973
11974 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11975 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Joe Perches7995c642010-02-17 15:01:52 +000011976 pr_err("Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011977 return -EINVAL;
11978 }
11979 }
11980
11981 /* Check FW version */
11982 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11983 fw_ver = firmware->data + offset;
11984 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11985 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11986 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11987 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Joe Perches7995c642010-02-17 15:01:52 +000011988 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011989 fw_ver[0], fw_ver[1], fw_ver[2],
11990 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11991 BCM_5710_FW_MINOR_VERSION,
11992 BCM_5710_FW_REVISION_VERSION,
11993 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000011994 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011995 }
11996
11997 return 0;
11998}
11999
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012000static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012001{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012002 const __be32 *source = (const __be32 *)_source;
12003 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012004 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012005
12006 for (i = 0; i < n/4; i++)
12007 target[i] = be32_to_cpu(source[i]);
12008}
12009
12010/*
12011 Ops array is stored in the following format:
12012 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12013 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012014static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012015{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012016 const __be32 *source = (const __be32 *)_source;
12017 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012018 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012019
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012020 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012021 tmp = be32_to_cpu(source[j]);
12022 target[i].op = (tmp >> 24) & 0xff;
12023 target[i].offset = tmp & 0xffffff;
12024 target[i].raw_data = be32_to_cpu(source[j+1]);
12025 }
12026}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012027
12028static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012029{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012030 const __be16 *source = (const __be16 *)_source;
12031 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012032 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012033
12034 for (i = 0; i < n/2; i++)
12035 target[i] = be16_to_cpu(source[i]);
12036}
12037
Joe Perches7995c642010-02-17 15:01:52 +000012038#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12039do { \
12040 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12041 bp->arr = kmalloc(len, GFP_KERNEL); \
12042 if (!bp->arr) { \
12043 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12044 goto lbl; \
12045 } \
12046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12047 (u8 *)bp->arr, len); \
12048} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012049
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012050static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12051{
Ben Hutchings45229b42009-11-07 11:53:39 +000012052 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012053 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +000012054 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012055
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012056 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +000012057 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012058 else
Ben Hutchings45229b42009-11-07 11:53:39 +000012059 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012060
Joe Perches7995c642010-02-17 15:01:52 +000012061 pr_info("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012062
12063 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12064 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012065 pr_err("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012066 goto request_firmware_exit;
12067 }
12068
12069 rc = bnx2x_check_firmware(bp);
12070 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012071 pr_err("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012072 goto request_firmware_exit;
12073 }
12074
12075 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12076
12077 /* Initialize the pointers to the init arrays */
12078 /* Blob */
12079 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12080
12081 /* Opcodes */
12082 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12083
12084 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012085 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12086 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012087
12088 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +000012089 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12090 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12091 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12092 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12093 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12094 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12095 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12096 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12097 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12098 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12099 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12100 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12101 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12102 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12103 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12104 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012105
12106 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +000012107
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012108init_offsets_alloc_err:
12109 kfree(bp->init_ops);
12110init_ops_alloc_err:
12111 kfree(bp->init_data);
12112request_firmware_exit:
12113 release_firmware(bp->firmware);
12114
12115 return rc;
12116}
12117
12118
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012119static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12120 const struct pci_device_id *ent)
12121{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012122 struct net_device *dev = NULL;
12123 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012124 int pcie_width, pcie_speed;
Eliezer Tamir25047952008-02-28 11:50:16 -080012125 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012126
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012127 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012128 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012129 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012130 pr_err("Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012131 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012132 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012133
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012134 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +000012135 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012136
Eilon Greensteindf4770de2009-08-12 08:23:28 +000012137 pci_set_drvdata(pdev, dev);
12138
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012139 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012140 if (rc < 0) {
12141 free_netdev(dev);
12142 return rc;
12143 }
12144
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012145 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012146 if (rc)
12147 goto init_one_exit;
12148
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012149 /* Set init arrays */
12150 rc = bnx2x_init_firmware(bp, &pdev->dev);
12151 if (rc) {
Joe Perches7995c642010-02-17 15:01:52 +000012152 pr_err("Error loading firmware\n");
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012153 goto init_one_exit;
12154 }
12155
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012156 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012157 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000012158 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012159 goto init_one_exit;
12160 }
12161
Eilon Greenstein37f9ce62009-08-12 08:23:34 +000012162 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Joe Perches7995c642010-02-17 15:01:52 +000012163 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12164 board_info[ent->driver_data].name,
12165 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12166 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12167 dev->base_addr, bp->pdev->irq, dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000012168
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012169 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012170
12171init_one_exit:
12172 if (bp->regview)
12173 iounmap(bp->regview);
12174
12175 if (bp->doorbells)
12176 iounmap(bp->doorbells);
12177
12178 free_netdev(dev);
12179
12180 if (atomic_read(&pdev->enable_cnt) == 1)
12181 pci_release_regions(pdev);
12182
12183 pci_disable_device(pdev);
12184 pci_set_drvdata(pdev, NULL);
12185
12186 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012187}
12188
12189static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12190{
12191 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012192 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012193
Eliezer Tamir228241e2008-02-28 11:56:57 -080012194 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012195 pr_err("BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080012196 return;
12197 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012198 bp = netdev_priv(dev);
12199
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012200 unregister_netdev(dev);
12201
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012202 kfree(bp->init_ops_offsets);
12203 kfree(bp->init_ops);
12204 kfree(bp->init_data);
12205 release_firmware(bp->firmware);
12206
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012207 if (bp->regview)
12208 iounmap(bp->regview);
12209
12210 if (bp->doorbells)
12211 iounmap(bp->doorbells);
12212
12213 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012214
12215 if (atomic_read(&pdev->enable_cnt) == 1)
12216 pci_release_regions(pdev);
12217
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012218 pci_disable_device(pdev);
12219 pci_set_drvdata(pdev, NULL);
12220}
12221
12222static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12223{
12224 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012225 struct bnx2x *bp;
12226
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012227 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012228 pr_err("BAD net device from bnx2x_init_one\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012229 return -ENODEV;
12230 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012231 bp = netdev_priv(dev);
12232
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012233 rtnl_lock();
12234
12235 pci_save_state(pdev);
12236
12237 if (!netif_running(dev)) {
12238 rtnl_unlock();
12239 return 0;
12240 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012241
12242 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012243
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012244 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012245
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012246 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080012247
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012248 rtnl_unlock();
12249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012250 return 0;
12251}
12252
12253static int bnx2x_resume(struct pci_dev *pdev)
12254{
12255 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080012256 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012257 int rc;
12258
Eliezer Tamir228241e2008-02-28 11:56:57 -080012259 if (!dev) {
Joe Perches7995c642010-02-17 15:01:52 +000012260 pr_err("BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -080012261 return -ENODEV;
12262 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012263 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012264
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012265 rtnl_lock();
12266
Eliezer Tamir228241e2008-02-28 11:56:57 -080012267 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012268
12269 if (!netif_running(dev)) {
12270 rtnl_unlock();
12271 return 0;
12272 }
12273
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012274 bnx2x_set_power_state(bp, PCI_D0);
12275 netif_device_attach(dev);
12276
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012277 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012278
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012279 rtnl_unlock();
12280
12281 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012282}
12283
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012284static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12285{
12286 int i;
12287
12288 bp->state = BNX2X_STATE_ERROR;
12289
12290 bp->rx_mode = BNX2X_RX_MODE_NONE;
12291
12292 bnx2x_netif_stop(bp, 0);
12293
12294 del_timer_sync(&bp->timer);
12295 bp->stats_state = STATS_STATE_DISABLED;
12296 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12297
12298 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +000012299 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012300
12301 if (CHIP_IS_E1(bp)) {
12302 struct mac_configuration_cmd *config =
12303 bnx2x_sp(bp, mcast_config);
12304
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012305 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012306 CAM_INVALIDATE(config->config_table[i]);
12307 }
12308
12309 /* Free SKBs, SGEs, TPA pool and driver internals */
12310 bnx2x_free_skbs(bp);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012311 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012312 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000012313 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012314 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012315 bnx2x_free_mem(bp);
12316
12317 bp->state = BNX2X_STATE_CLOSED;
12318
12319 netif_carrier_off(bp->dev);
12320
12321 return 0;
12322}
12323
12324static void bnx2x_eeh_recover(struct bnx2x *bp)
12325{
12326 u32 val;
12327
12328 mutex_init(&bp->port.phy_mutex);
12329
12330 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12331 bp->link_params.shmem_base = bp->common.shmem_base;
12332 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12333
12334 if (!bp->common.shmem_base ||
12335 (bp->common.shmem_base < 0xA0000) ||
12336 (bp->common.shmem_base >= 0xC0000)) {
12337 BNX2X_DEV_INFO("MCP not active\n");
12338 bp->flags |= NO_MCP_FLAG;
12339 return;
12340 }
12341
12342 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12343 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12344 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12345 BNX2X_ERR("BAD MCP validity signature\n");
12346
12347 if (!BP_NOMCP(bp)) {
12348 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12349 & DRV_MSG_SEQ_NUMBER_MASK);
12350 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12351 }
12352}
12353
Wendy Xiong493adb12008-06-23 20:36:22 -070012354/**
12355 * bnx2x_io_error_detected - called when PCI error is detected
12356 * @pdev: Pointer to PCI device
12357 * @state: The current pci connection state
12358 *
12359 * This function is called after a PCI bus error affecting
12360 * this device has been detected.
12361 */
12362static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12363 pci_channel_state_t state)
12364{
12365 struct net_device *dev = pci_get_drvdata(pdev);
12366 struct bnx2x *bp = netdev_priv(dev);
12367
12368 rtnl_lock();
12369
12370 netif_device_detach(dev);
12371
Dean Nelson07ce50e2009-07-31 09:13:25 +000012372 if (state == pci_channel_io_perm_failure) {
12373 rtnl_unlock();
12374 return PCI_ERS_RESULT_DISCONNECT;
12375 }
12376
Wendy Xiong493adb12008-06-23 20:36:22 -070012377 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012378 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012379
12380 pci_disable_device(pdev);
12381
12382 rtnl_unlock();
12383
12384 /* Request a slot reset */
12385 return PCI_ERS_RESULT_NEED_RESET;
12386}
12387
12388/**
12389 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12390 * @pdev: Pointer to PCI device
12391 *
12392 * Restart the card from scratch, as if from a cold-boot.
12393 */
12394static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12395{
12396 struct net_device *dev = pci_get_drvdata(pdev);
12397 struct bnx2x *bp = netdev_priv(dev);
12398
12399 rtnl_lock();
12400
12401 if (pci_enable_device(pdev)) {
12402 dev_err(&pdev->dev,
12403 "Cannot re-enable PCI device after reset\n");
12404 rtnl_unlock();
12405 return PCI_ERS_RESULT_DISCONNECT;
12406 }
12407
12408 pci_set_master(pdev);
12409 pci_restore_state(pdev);
12410
12411 if (netif_running(dev))
12412 bnx2x_set_power_state(bp, PCI_D0);
12413
12414 rtnl_unlock();
12415
12416 return PCI_ERS_RESULT_RECOVERED;
12417}
12418
12419/**
12420 * bnx2x_io_resume - called when traffic can start flowing again
12421 * @pdev: Pointer to PCI device
12422 *
12423 * This callback is called when the error recovery driver tells us that
12424 * its OK to resume normal operation.
12425 */
12426static void bnx2x_io_resume(struct pci_dev *pdev)
12427{
12428 struct net_device *dev = pci_get_drvdata(pdev);
12429 struct bnx2x *bp = netdev_priv(dev);
12430
12431 rtnl_lock();
12432
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012433 bnx2x_eeh_recover(bp);
12434
Wendy Xiong493adb12008-06-23 20:36:22 -070012435 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012436 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012437
12438 netif_device_attach(dev);
12439
12440 rtnl_unlock();
12441}
12442
12443static struct pci_error_handlers bnx2x_err_handler = {
12444 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012445 .slot_reset = bnx2x_io_slot_reset,
12446 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012447};
12448
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012449static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012450 .name = DRV_MODULE_NAME,
12451 .id_table = bnx2x_pci_tbl,
12452 .probe = bnx2x_init_one,
12453 .remove = __devexit_p(bnx2x_remove_one),
12454 .suspend = bnx2x_suspend,
12455 .resume = bnx2x_resume,
12456 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012457};
12458
12459static int __init bnx2x_init(void)
12460{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012461 int ret;
12462
Joe Perches7995c642010-02-17 15:01:52 +000012463 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +000012464
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012465 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12466 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +000012467 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012468 return -ENOMEM;
12469 }
12470
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012471 ret = pci_register_driver(&bnx2x_pci_driver);
12472 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +000012473 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012474 destroy_workqueue(bnx2x_wq);
12475 }
12476 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012477}
12478
12479static void __exit bnx2x_cleanup(void)
12480{
12481 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012482
12483 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012484}
12485
12486module_init(bnx2x_init);
12487module_exit(bnx2x_cleanup);
12488
Michael Chan993ac7b2009-10-10 13:46:56 +000012489#ifdef BCM_CNIC
12490
12491/* count denotes the number of new completions we have seen */
12492static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12493{
12494 struct eth_spe *spe;
12495
12496#ifdef BNX2X_STOP_ON_ERROR
12497 if (unlikely(bp->panic))
12498 return;
12499#endif
12500
12501 spin_lock_bh(&bp->spq_lock);
12502 bp->cnic_spq_pending -= count;
12503
12504 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12505 bp->cnic_spq_pending++) {
12506
12507 if (!bp->cnic_kwq_pending)
12508 break;
12509
12510 spe = bnx2x_sp_get_next(bp);
12511 *spe = *bp->cnic_kwq_cons;
12512
12513 bp->cnic_kwq_pending--;
12514
12515 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12516 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12517
12518 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12519 bp->cnic_kwq_cons = bp->cnic_kwq;
12520 else
12521 bp->cnic_kwq_cons++;
12522 }
12523 bnx2x_sp_prod_update(bp);
12524 spin_unlock_bh(&bp->spq_lock);
12525}
12526
12527static int bnx2x_cnic_sp_queue(struct net_device *dev,
12528 struct kwqe_16 *kwqes[], u32 count)
12529{
12530 struct bnx2x *bp = netdev_priv(dev);
12531 int i;
12532
12533#ifdef BNX2X_STOP_ON_ERROR
12534 if (unlikely(bp->panic))
12535 return -EIO;
12536#endif
12537
12538 spin_lock_bh(&bp->spq_lock);
12539
12540 for (i = 0; i < count; i++) {
12541 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12542
12543 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12544 break;
12545
12546 *bp->cnic_kwq_prod = *spe;
12547
12548 bp->cnic_kwq_pending++;
12549
12550 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12551 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12552 spe->data.mac_config_addr.hi,
12553 spe->data.mac_config_addr.lo,
12554 bp->cnic_kwq_pending);
12555
12556 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12557 bp->cnic_kwq_prod = bp->cnic_kwq;
12558 else
12559 bp->cnic_kwq_prod++;
12560 }
12561
12562 spin_unlock_bh(&bp->spq_lock);
12563
12564 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12565 bnx2x_cnic_sp_post(bp, 0);
12566
12567 return i;
12568}
12569
12570static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12571{
12572 struct cnic_ops *c_ops;
12573 int rc = 0;
12574
12575 mutex_lock(&bp->cnic_mutex);
12576 c_ops = bp->cnic_ops;
12577 if (c_ops)
12578 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12579 mutex_unlock(&bp->cnic_mutex);
12580
12581 return rc;
12582}
12583
12584static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12585{
12586 struct cnic_ops *c_ops;
12587 int rc = 0;
12588
12589 rcu_read_lock();
12590 c_ops = rcu_dereference(bp->cnic_ops);
12591 if (c_ops)
12592 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12593 rcu_read_unlock();
12594
12595 return rc;
12596}
12597
12598/*
12599 * for commands that have no data
12600 */
12601static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12602{
12603 struct cnic_ctl_info ctl = {0};
12604
12605 ctl.cmd = cmd;
12606
12607 return bnx2x_cnic_ctl_send(bp, &ctl);
12608}
12609
12610static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12611{
12612 struct cnic_ctl_info ctl;
12613
12614 /* first we tell CNIC and only then we count this as a completion */
12615 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12616 ctl.data.comp.cid = cid;
12617
12618 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12619 bnx2x_cnic_sp_post(bp, 1);
12620}
12621
12622static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12623{
12624 struct bnx2x *bp = netdev_priv(dev);
12625 int rc = 0;
12626
12627 switch (ctl->cmd) {
12628 case DRV_CTL_CTXTBL_WR_CMD: {
12629 u32 index = ctl->data.io.offset;
12630 dma_addr_t addr = ctl->data.io.dma_addr;
12631
12632 bnx2x_ilt_wr(bp, index, addr);
12633 break;
12634 }
12635
12636 case DRV_CTL_COMPLETION_CMD: {
12637 int count = ctl->data.comp.comp_count;
12638
12639 bnx2x_cnic_sp_post(bp, count);
12640 break;
12641 }
12642
12643 /* rtnl_lock is held. */
12644 case DRV_CTL_START_L2_CMD: {
12645 u32 cli = ctl->data.ring.client_id;
12646
12647 bp->rx_mode_cl_mask |= (1 << cli);
12648 bnx2x_set_storm_rx_mode(bp);
12649 break;
12650 }
12651
12652 /* rtnl_lock is held. */
12653 case DRV_CTL_STOP_L2_CMD: {
12654 u32 cli = ctl->data.ring.client_id;
12655
12656 bp->rx_mode_cl_mask &= ~(1 << cli);
12657 bnx2x_set_storm_rx_mode(bp);
12658 break;
12659 }
12660
12661 default:
12662 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12663 rc = -EINVAL;
12664 }
12665
12666 return rc;
12667}
12668
12669static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12670{
12671 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12672
12673 if (bp->flags & USING_MSIX_FLAG) {
12674 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12675 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12676 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12677 } else {
12678 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12679 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12680 }
12681 cp->irq_arr[0].status_blk = bp->cnic_sb;
12682 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12683 cp->irq_arr[1].status_blk = bp->def_status_blk;
12684 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12685
12686 cp->num_irq = 2;
12687}
12688
12689static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12690 void *data)
12691{
12692 struct bnx2x *bp = netdev_priv(dev);
12693 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12694
12695 if (ops == NULL)
12696 return -EINVAL;
12697
12698 if (atomic_read(&bp->intr_sem) != 0)
12699 return -EBUSY;
12700
12701 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12702 if (!bp->cnic_kwq)
12703 return -ENOMEM;
12704
12705 bp->cnic_kwq_cons = bp->cnic_kwq;
12706 bp->cnic_kwq_prod = bp->cnic_kwq;
12707 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12708
12709 bp->cnic_spq_pending = 0;
12710 bp->cnic_kwq_pending = 0;
12711
12712 bp->cnic_data = data;
12713
12714 cp->num_irq = 0;
12715 cp->drv_state = CNIC_DRV_STATE_REGD;
12716
12717 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12718
12719 bnx2x_setup_cnic_irq_info(bp);
12720 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12721 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12722 rcu_assign_pointer(bp->cnic_ops, ops);
12723
12724 return 0;
12725}
12726
12727static int bnx2x_unregister_cnic(struct net_device *dev)
12728{
12729 struct bnx2x *bp = netdev_priv(dev);
12730 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12731
12732 mutex_lock(&bp->cnic_mutex);
12733 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12734 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12735 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12736 }
12737 cp->drv_state = 0;
12738 rcu_assign_pointer(bp->cnic_ops, NULL);
12739 mutex_unlock(&bp->cnic_mutex);
12740 synchronize_rcu();
12741 kfree(bp->cnic_kwq);
12742 bp->cnic_kwq = NULL;
12743
12744 return 0;
12745}
12746
12747struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12748{
12749 struct bnx2x *bp = netdev_priv(dev);
12750 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12751
12752 cp->drv_owner = THIS_MODULE;
12753 cp->chip_id = CHIP_ID(bp);
12754 cp->pdev = bp->pdev;
12755 cp->io_base = bp->regview;
12756 cp->io_base2 = bp->doorbells;
12757 cp->max_kwqe_pending = 8;
12758 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12759 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12760 cp->ctx_tbl_len = CNIC_ILT_LINES;
12761 cp->starting_cid = BCM_CNIC_CID_START;
12762 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12763 cp->drv_ctl = bnx2x_drv_ctl;
12764 cp->drv_register_cnic = bnx2x_register_cnic;
12765 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12766
12767 return cp;
12768}
12769EXPORT_SYMBOL(bnx2x_cnic_probe);
12770
12771#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012772